code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=lowerCamelCase__ , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=lowerCamelCase__ , default=5 )
parser.add_argument("--batch_size" , type=lowerCamelCase__ , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=lowerCamelCase__ , default=1 )
parser.add_argument("--freeze" , type=lowerCamelCase__ , default=lowerCamelCase__ )
parser.add_argument("--learning_rate" , type=lowerCamelCase__ , default=5e-4 )
parser.add_argument("--seed" , type=lowerCamelCase__ , default=0 )
parser.add_argument("--lr_scheduler_type" , type=lowerCamelCase__ , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=lowerCamelCase__ , default=1_0 )
parser.add_argument("--weight_decay" , type=lowerCamelCase__ , default=0.01 )
parser.add_argument("--output_dir" , type=lowerCamelCase__ , default="./results" )
return parser.parse_args()
__A =load('''accuracy''')
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = eval_pred
lowerCamelCase_ = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> None:
super().__init__()
lowerCamelCase_ = trainer
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , **lowercase ) -> Dict:
if control.should_evaluate:
lowerCamelCase_ = deepcopy(lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def lowerCamelCase_ ( ):
lowerCamelCase_ = get_args()
set_seed(args.seed )
lowerCamelCase_ = load_dataset("codeparrot/codecomplex" , split="train" )
lowerCamelCase_ = dataset.train_test_split(test_size=0.2 )
lowerCamelCase_ = train_test["test"].train_test_split(test_size=0.5 )
lowerCamelCase_ = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowerCamelCase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowerCamelCase_ = False
lowerCamelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCamelCase__ ):
lowerCamelCase_ = tokenizer(example["src"] , truncation=lowerCamelCase__ , max_length=1_0_2_4 )
lowerCamelCase_ = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowerCamelCase_ = train_test_validation.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=train_test_validation["train"].column_names , )
lowerCamelCase_ = DataCollatorWithPadding(tokenizer=lowerCamelCase__ )
lowerCamelCase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
lowerCamelCase_ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
print("Training..." )
trainer.add_callback(CustomCallback(lowerCamelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 19 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = BioGptTokenizer
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase_ = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCamelCase_ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowercase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = "lower"
lowerCamelCase_ = ["low", "er</w>"]
lowerCamelCase_ = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCamelCase_ = tokens + ["<unk>"]
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 19 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'philschmid/bart-large-cnn-samsum'
lowerCAmelCase__ = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
lowerCAmelCase__ = 'summarizer'
lowerCAmelCase__ = AutoTokenizer
lowerCAmelCase__ = AutoModelForSeqaSeqLM
lowerCAmelCase__ = ['text']
lowerCAmelCase__ = ['text']
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
return self.pre_processor(lowercase , return_tensors="pt" , truncation=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
return self.model.generate(**lowercase )[0]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
| 19 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
lowerCamelCase_ = "backbone." if is_semantic else ""
lowerCamelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', "beit.embeddings.cls_token"),
(F'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"),
(F'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"),
(F'{prefix}pos_embed', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False , lowerCamelCase__=False ):
for i in range(config.num_hidden_layers ):
lowerCamelCase_ = "backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
lowerCamelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ = q_bias
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
lowerCamelCase_ = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
lowerCamelCase_ = gamma_a
lowerCamelCase_ = gamma_a
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = dct.pop(lowerCamelCase__ )
lowerCamelCase_ = val
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = False if "rvlcdip" in checkpoint_url else True
lowerCamelCase_ = BeitConfig(use_absolute_position_embeddings=lowerCamelCase__ , use_mask_token=lowerCamelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase_ = 1_6
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "rvlcdip-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
lowerCamelCase_ = create_rename_keys(lowerCamelCase__ , has_lm_head=lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , has_lm_head=lowerCamelCase__ )
# load HuggingFace model
lowerCamelCase_ = BeitForMaskedImageModeling(lowerCamelCase__ ) if has_lm_head else BeitForImageClassification(lowerCamelCase__ )
model.eval()
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image
lowerCamelCase_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
lowerCamelCase_ = encoding["pixel_values"]
lowerCamelCase_ = model(lowerCamelCase__ )
lowerCamelCase_ = outputs.logits
# verify logits
lowerCamelCase_ = [1, 1_6] if "rvlcdip" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(lowerCamelCase__ ), "Shape of logits not as expected"
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
if has_lm_head:
lowerCamelCase_ = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase_ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=lowerCamelCase__ , )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__A =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase_ = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase_ = {"unk_token": "<unk>"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
lowerCamelCase_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Optional[int]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Union[str, Any]:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase_ = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCamelCase_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(lowercase , return_tensors="np" )
lowerCamelCase_ = processor(images=lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = processor(text=lowercase )
lowerCamelCase_ = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(lowercase )
lowerCamelCase_ = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase )
lowerCamelCase_ = "lower newer"
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 19 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 1 |
import pytest
__A ='''__dummy_dataset1__'''
__A ='''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def lowerCamelCase_ ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase_ ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = dataset_loading_script_name
lowerCamelCase_ = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
lowerCamelCase_ = script_dir / F'{script_name}.py'
with open(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ )
| 19 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase_ = 1
lowerCamelCase_ = 1
while repunit:
lowerCamelCase_ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_0_0_0_0_0 ):
lowerCamelCase_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 1 |
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 1 |
__A ={}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCamelCase_ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCamelCase_ = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCamelCase_ = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCamelCase_ = _calculate(days - 1 , lowerCamelCase__ , 0 )
lowerCamelCase_ = state_late + state_absent + state_ontime
lowerCamelCase_ = prizestrings
return prizestrings
def lowerCamelCase_ ( lowerCamelCase__ = 3_0 ):
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 19 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , lowerCamelCase__ )
lowerCamelCase_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowerCamelCase_ = dataset_size < in_memory_max_size
else:
lowerCamelCase_ = False
lowerCamelCase_ = is_small_dataset(lowerCamelCase__ )
assert result == expected
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__A =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
for attribute in key.split("." ):
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
lowerCamelCase_ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
lowerCamelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowerCamelCase_ = value
elif weight_type == "weight_g":
lowerCamelCase_ = value
elif weight_type == "weight_v":
lowerCamelCase_ = value
elif weight_type == "bias":
lowerCamelCase_ = value
elif weight_type == "running_mean":
lowerCamelCase_ = value
elif weight_type == "running_var":
lowerCamelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase_ = value
elif weight_type == "inv_freq":
lowerCamelCase_ = value
else:
lowerCamelCase_ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = fairseq_model.state_dict()
lowerCamelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase_ = True
if "*" in mapped_key:
lowerCamelCase_ = name.split(lowerCamelCase__ )[0].split("." )[-2]
lowerCamelCase_ = mapped_key.replace("*" , lowerCamelCase__ )
if "pos_bias_u" in name:
lowerCamelCase_ = None
elif "pos_bias_v" in name:
lowerCamelCase_ = None
elif "weight_g" in name:
lowerCamelCase_ = "weight_g"
elif "weight_v" in name:
lowerCamelCase_ = "weight_v"
elif "bias" in name:
lowerCamelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ = "weight"
elif "running_mean" in name:
lowerCamelCase_ = "running_mean"
elif "inv_freq" in name:
lowerCamelCase_ = "inv_freq"
elif "running_var" in name:
lowerCamelCase_ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase_ = "num_batches_tracked"
else:
lowerCamelCase_ = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = full_name.split("conv_layers." )[-1]
lowerCamelCase_ = name.split("." )
lowerCamelCase_ = int(items[0] )
lowerCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowerCamelCase_ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
if config_path is not None:
lowerCamelCase_ = WavaVecaConformerConfig.from_pretrained(lowerCamelCase__ , hidden_act="swish" )
else:
lowerCamelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase_ = "rotary"
if is_finetuned:
if dict_path:
lowerCamelCase_ = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase_ = target_dict.pad_index
lowerCamelCase_ = target_dict.bos_index
lowerCamelCase_ = target_dict.eos_index
lowerCamelCase_ = len(target_dict.symbols )
lowerCamelCase_ = os.path.join(lowerCamelCase__ , "vocab.json" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
lowerCamelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase_ = 0
lowerCamelCase_ = 1
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase__ , )
lowerCamelCase_ = True if config.feat_extract_norm == "layer" else False
lowerCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
lowerCamelCase_ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
lowerCamelCase_ = WavaVecaConformerForCTC(lowerCamelCase__ )
else:
lowerCamelCase_ = WavaVecaConformerForPreTraining(lowerCamelCase__ )
if is_finetuned:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase_ = argparse.Namespace(task="audio_pretraining" )
lowerCamelCase_ = fairseq.tasks.setup_task(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
lowerCamelCase_ = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 19 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ ):
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = credit_card_number
lowerCamelCase_ = 0
lowerCamelCase_ = len(lowerCamelCase__ ) - 2
for i in range(lowerCamelCase__ , -1 , -2 ):
# double the value of every second digit
lowerCamelCase_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowerCamelCase_ = cc_number[:i] + str(lowerCamelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCamelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCamelCase__ ) <= 1_6:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCamelCase__ ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCamelCase__ ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A =logging.get_logger(__name__)
# General docstring
__A ='''MobileNetV1Config'''
# Base docstring
__A ='''google/mobilenet_v1_1.0_224'''
__A =[1, 1_0_2_4, 7, 7]
# Image classification docstring
__A ='''google/mobilenet_v1_1.0_224'''
__A ='''tabby, tabby cat'''
__A =[
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
lowerCamelCase_ = {}
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = model.mobilenet_va
else:
lowerCamelCase_ = model
lowerCamelCase_ = "MobilenetV1/Conv2d_0/"
lowerCamelCase_ = backbone.conv_stem.convolution.weight
lowerCamelCase_ = backbone.conv_stem.normalization.bias
lowerCamelCase_ = backbone.conv_stem.normalization.weight
lowerCamelCase_ = backbone.conv_stem.normalization.running_mean
lowerCamelCase_ = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowerCamelCase_ = i + 1
lowerCamelCase_ = i * 2
lowerCamelCase_ = backbone.layer[pt_index]
lowerCamelCase_ = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
lowerCamelCase_ = pointer.convolution.weight
lowerCamelCase_ = pointer.normalization.bias
lowerCamelCase_ = pointer.normalization.weight
lowerCamelCase_ = pointer.normalization.running_mean
lowerCamelCase_ = pointer.normalization.running_var
lowerCamelCase_ = backbone.layer[pt_index + 1]
lowerCamelCase_ = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
lowerCamelCase_ = pointer.convolution.weight
lowerCamelCase_ = pointer.normalization.bias
lowerCamelCase_ = pointer.normalization.weight
lowerCamelCase_ = pointer.normalization.running_mean
lowerCamelCase_ = pointer.normalization.running_var
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = "MobilenetV1/Logits/Conv2d_1c_1x1/"
lowerCamelCase_ = model.classifier.weight
lowerCamelCase_ = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
lowerCamelCase_ = tf.train.list_variables(lowerCamelCase__ )
lowerCamelCase_ = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
lowerCamelCase_ = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = array
# Build TF to PyTorch weights loading map
lowerCamelCase_ = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
lowerCamelCase_ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
lowerCamelCase_ = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCamelCase_ = array.squeeze().transpose()
else:
lowerCamelCase_ = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ )
tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ )
tf_weights.pop(name + "/RMSProp" , lowerCamelCase__ )
tf_weights.pop(name + "/RMSProp_1" , lowerCamelCase__ )
tf_weights.pop(name + "/ExponentialMovingAverage" , lowerCamelCase__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = features.shape[-2:]
lowerCamelCase_ , lowerCamelCase_ = conv_layer.stride
lowerCamelCase_ , lowerCamelCase_ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCamelCase_ = max(kernel_height - stride_height , 0 )
else:
lowerCamelCase_ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCamelCase_ = max(kernel_width - stride_width , 0 )
else:
lowerCamelCase_ = max(kernel_width - (in_width % stride_width) , 0 )
lowerCamelCase_ = pad_along_width // 2
lowerCamelCase_ = pad_along_width - pad_left
lowerCamelCase_ = pad_along_height // 2
lowerCamelCase_ = pad_along_height - pad_top
lowerCamelCase_ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , "constant" , 0.0 )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = False , lowercase = True , lowercase = True , ) -> None:
super().__init__()
lowerCamelCase_ = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
lowerCamelCase_ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCamelCase_ = nn.Convad(
in_channels=lowercase , out_channels=lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase , groups=lowercase , bias=lowercase , padding_mode="zeros" , )
if use_normalization:
lowerCamelCase_ = nn.BatchNormad(
num_features=lowercase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=lowercase , track_running_stats=lowercase , )
else:
lowerCamelCase_ = None
if use_activation:
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowercase ):
lowerCamelCase_ = ACTaFN[config.hidden_act]
else:
lowerCamelCase_ = config.hidden_act
else:
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> torch.Tensor:
if self.config.tf_padding:
lowerCamelCase_ = apply_tf_padding(lowercase , self.convolution )
lowerCamelCase_ = self.convolution(lowercase )
if self.normalization is not None:
lowerCamelCase_ = self.normalization(lowercase )
if self.activation is not None:
lowerCamelCase_ = self.activation(lowercase )
return features
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = MobileNetVaConfig
lowerCAmelCase__ = load_tf_weights_in_mobilenet_va
lowerCAmelCase__ = 'mobilenet_v1'
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
if isinstance(lowercase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A =R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__A =R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , snake_case_ , )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase = True ) -> Optional[Any]:
super().__init__(lowercase )
lowerCamelCase_ = config
lowerCamelCase_ = 32
lowerCamelCase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCamelCase_ = MobileNetVaConvLayer(
lowercase , in_channels=config.num_channels , out_channels=lowercase , kernel_size=3 , stride=2 , )
lowerCamelCase_ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCamelCase_ = nn.ModuleList()
for i in range(13 ):
lowerCamelCase_ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCamelCase_ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowercase , in_channels=lowercase , out_channels=lowercase , kernel_size=3 , stride=strides[i] , groups=lowercase , ) )
self.layer.append(
MobileNetVaConvLayer(
lowercase , in_channels=lowercase , out_channels=lowercase , kernel_size=1 , ) )
lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase = None , lowercase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowerCamelCase_ = self.conv_stem(lowercase )
lowerCamelCase_ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCamelCase_ = layer_module(lowercase )
if output_hidden_states:
lowerCamelCase_ = all_hidden_states + (hidden_states,)
lowerCamelCase_ = hidden_states
if self.pooler is not None:
lowerCamelCase_ = torch.flatten(self.pooler(lowercase ) , start_dim=1 )
else:
lowerCamelCase_ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=lowercase , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case_ , )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> None:
super().__init__(lowercase )
lowerCamelCase_ = config.num_labels
lowerCamelCase_ = MobileNetVaModel(lowercase )
lowerCamelCase_ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCamelCase_ = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase )
lowerCamelCase_ = nn.Linear(lowercase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = self.mobilenet_va(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
lowerCamelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase_ = self.classifier(self.dropout(lowercase ) )
lowerCamelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase_ = "single_label_classification"
else:
lowerCamelCase_ = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCamelCase_ = MSELoss()
if self.num_labels == 1:
lowerCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase_ = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase_ = CrossEntropyLoss()
lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase_ = BCEWithLogitsLoss()
lowerCamelCase_ = loss_fct(lowercase , lowercase )
if not return_dict:
lowerCamelCase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states , )
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
lowerCamelCase_ = {
"input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase_ = model(lowercase )["last_hidden_state"]
lowerCamelCase_ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.convert_to_tensor(
[
[
[0.0_6_8_1_7_6_2, 0.1_0_8_9_4_4_5_1, 0.0_6_7_7_2_5_0_4],
[-0.0_6_4_2_3_6_6_8, 0.0_2_3_6_6_6_1_5, 0.0_4_3_2_9_3_4_4],
[-0.0_6_0_5_7_2_9_5, 0.0_9_9_7_4_1_3_5, -0.0_0_0_7_0_5_8_4],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> str:
lowerCamelCase_ = feature_size
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = padding_value
lowerCamelCase_ = kwargs.pop("padding_side" , "right" )
lowerCamelCase_ = kwargs.pop("return_attention_mask" , lowercase )
super().__init__(**lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowerCamelCase_ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
lowerCamelCase_ = processed_features[self.model_input_names[0]]
lowerCamelCase_ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase ) == 0:
if return_attention_mask:
lowerCamelCase_ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCamelCase_ = required_input[0]
if isinstance(lowercase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCamelCase_ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase ):
lowerCamelCase_ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase ):
lowerCamelCase_ = "tf"
elif is_torch_tensor(lowercase ):
lowerCamelCase_ = "pt"
elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ):
lowerCamelCase_ = "np"
else:
raise ValueError(
f'type of {first_element} unknown: {type(lowercase )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowerCamelCase_ = to_numpy(lowercase )
else:
lowerCamelCase_ = [to_numpy(lowercase ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCamelCase_ = self._get_padding_strategies(padding=lowercase , max_length=lowercase )
lowerCamelCase_ = processed_features[self.model_input_names[0]]
lowerCamelCase_ = len(lowercase )
if not all(len(lowercase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
lowerCamelCase_ = []
for i in range(lowercase ):
lowerCamelCase_ = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCamelCase_ = self._truncate(
lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , )
truncated_inputs.append(lowercase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCamelCase_ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCamelCase_ = PaddingStrategy.MAX_LENGTH
lowerCamelCase_ = {}
for i in range(lowercase ):
# padding
lowerCamelCase_ = self._pad(
truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCamelCase_ = []
if value.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = value.astype(np.floataa )
batch_outputs[key].append(lowercase )
return BatchFeature(lowercase , tensor_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict:
lowerCamelCase_ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCamelCase_ = len(lowercase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCamelCase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCamelCase_ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCamelCase_ = np.ones(len(lowercase ) , dtype=np.intaa )
if needs_to_be_padded:
lowerCamelCase_ = max_length - len(lowercase )
if self.padding_side == "right":
if return_attention_mask:
lowerCamelCase_ = np.pad(
processed_features["attention_mask"] , (0, difference) )
lowerCamelCase_ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCamelCase_ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCamelCase_ = np.pad(
processed_features["attention_mask"] , (difference, 0) )
lowerCamelCase_ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCamelCase_ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Dict:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
lowerCamelCase_ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCamelCase_ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCamelCase_ = len(lowercase ) > max_length
if needs_to_be_truncated:
lowerCamelCase_ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCamelCase_ = processed_features["attention_mask"][:max_length]
return processed_features
def SCREAMING_SNAKE_CASE_( self , lowercase=False , lowercase=None ) -> Any:
# Get padding strategy
if padding is not False:
if padding is True:
lowerCamelCase_ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase , lowercase ):
lowerCamelCase_ = PaddingStrategy(lowercase )
elif isinstance(lowercase , lowercase ):
lowerCamelCase_ = padding
else:
lowerCamelCase_ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [True] * limit
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < limit:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_0_0_0_0_0 ):
lowerCamelCase_ = prime_sieve(lowerCamelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(len(lowerCamelCase__ ) ):
for j in range(i + length , len(lowerCamelCase__ ) ):
lowerCamelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ = j - i
lowerCamelCase_ = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A ='''▁'''
__A =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = BertGenerationTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
super().setUp()
lowerCamelCase_ = BertGenerationTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = "<s>"
lowerCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowercase ) , 1002 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = BertGenerationTokenizer(lowercase , keep_accents=lowercase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "Hello World!"
lowerCamelCase_ = [18536, 2260, 101]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCamelCase_ = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ = " ".join(lowercase )
lowerCamelCase_ = self.big_tokenizer.encode_plus(lowercase , return_tensors="pt" , return_token_type_ids=lowercase )
lowerCamelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowercase )
lowerCamelCase_ = BertGenerationConfig()
lowerCamelCase_ = BertGenerationEncoder(lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase )
model(**lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
# fmt: off
lowerCamelCase_ = {"input_ids": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 19 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class _SCREAMING_SNAKE_CASE :
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
raise NotImplementedError()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
raise NotImplementedError()
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase = False , **lowercase ) -> Tuple:
lowerCamelCase_ = tokenizer
lowerCamelCase_ = skip_prompt
lowerCamelCase_ = decode_kwargs
# variables used in the streaming process
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = True
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
lowerCamelCase_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCamelCase_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCamelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
lowerCamelCase_ = text[self.print_len :]
lowerCamelCase_ = []
lowerCamelCase_ = 0
# If the last token is a CJK character, we print the characters.
elif len(lowercase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCamelCase_ = text[self.print_len :]
self.print_len += len(lowercase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCamelCase_ = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowercase )
self.on_finalized_text(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCamelCase_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCamelCase_ = text[self.print_len :]
lowerCamelCase_ = []
lowerCamelCase_ = 0
else:
lowerCamelCase_ = ""
lowerCamelCase_ = True
self.on_finalized_text(lowercase , stream_end=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False ) -> List[str]:
print(lowercase , flush=lowercase , end="" if not stream_end else None )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase = False , lowercase = None , **lowercase ) -> List[str]:
super().__init__(lowercase , lowercase , **lowercase )
lowerCamelCase_ = Queue()
lowerCamelCase_ = None
lowerCamelCase_ = timeout
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False ) -> Dict:
self.text_queue.put(lowercase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> str:
return self
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''yjernite/retribert-base-uncased''': 5_1_2,
}
__A ={
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = RetriBertTokenizer
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> List[Any]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(lowercase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**lowercase )
lowerCamelCase_ = do_lower_case
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> int:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 19 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Initialise PyTorch model
lowerCamelCase_ = TaConfig.from_json_file(lowerCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase_ = TaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 19 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = logging.get_logger()
# the current default level is logging.WARNING
lowerCamelCase_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = logging.get_verbosity()
lowerCamelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowerCamelCase_ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase ) as cl:
logger.warning(lowercase )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase ) as cl:
logger.warning(lowercase )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase ) as cl:
logger.warning(lowercase )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def SCREAMING_SNAKE_CASE_( self ) -> str:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowerCamelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowerCamelCase_ = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase )
lowerCamelCase_ = logging.log_levels[env_level_str]
lowerCamelCase_ = logging.get_verbosity()
self.assertEqual(
lowercase , lowercase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
lowerCamelCase_ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def SCREAMING_SNAKE_CASE_( self ) -> int:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
lowerCamelCase_ = logging.logging.getLogger()
with CaptureLogger(lowercase ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def SCREAMING_SNAKE_CASE_( self ) -> Any:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
lowerCamelCase_ = logging.get_logger("transformers.models.bart.tokenization_bart" )
lowerCamelCase_ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase ) as cl:
logger.warning_advice(lowercase )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase ) as cl:
logger.warning_advice(lowercase )
self.assertEqual(cl.out , msg + "\n" )
def lowerCamelCase_ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 19 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 1 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = PhobertTokenizer
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = ["T@@", "i", "I", "R@@", "r", "e@@"]
lowerCamelCase_ = dict(zip(lowercase , range(len(lowercase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "l à</w>"]
lowerCamelCase_ = {"unk_token": "<unk>"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = "Tôi là VinAI Research"
lowerCamelCase_ = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = "Tôi là VinAI Research"
lowerCamelCase_ = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
lowerCamelCase_ = tokenizer.tokenize(lowercase )
print(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
| 19 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__A =['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__A ={'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
__A =''' Hello world! cécé herlolip'''
__A =[
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = dct.pop(lowerCamelCase__ )
lowerCamelCase_ = val
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
if not os.path.exists(lowerCamelCase__ ):
lowerCamelCase_ = torch.hub.load("pytorch/fairseq" , lowerCamelCase__ ).eval()
else:
lowerCamelCase_ = load_xsum_checkpoint(lowerCamelCase__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
lowerCamelCase_ = checkpoint_path.replace("." , "-" )
lowerCamelCase_ = BartConfig.from_pretrained(lowerCamelCase__ )
lowerCamelCase_ = bart.encode(lowerCamelCase__ ).unsqueeze(0 )
lowerCamelCase_ = BartTokenizer.from_pretrained(lowerCamelCase__ ).encode(lowerCamelCase__ , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(lowerCamelCase__ , lowerCamelCase__ ).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
lowerCamelCase_ = bart.state_dict()
remove_ignore_keys_(lowerCamelCase__ )
lowerCamelCase_ = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = BartForSequenceClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = bart.predict("mnli" , lowerCamelCase__ , return_logits=lowerCamelCase__ )
lowerCamelCase_ = model(lowerCamelCase__ )[0] # logits
else: # no classification heads to worry about
lowerCamelCase_ = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase__ )
lowerCamelCase_ = state_dict["decoder.embed_tokens.weight"]
lowerCamelCase_ = bart.extract_features(lowerCamelCase__ )
if hf_checkpoint_name == "facebook/bart-large":
lowerCamelCase_ = BartModel(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
lowerCamelCase_ = model(lowerCamelCase__ ).model[0]
else:
lowerCamelCase_ = BartForConditionalGeneration(lowerCamelCase__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase__ )
if hasattr(lowerCamelCase__ , "lm_head" ):
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
lowerCamelCase_ = model.model(lowerCamelCase__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__A =parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 19 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 1 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=4 , ) -> Optional[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_attention_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_choices
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_attention_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
lowerCamelCase_ = model_class_name.from_pretrained("roberta-base" , from_pt=lowercase )
lowerCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 19 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCamelCase_ ( *lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__=True , lowerCamelCase__=2 ):
from .. import __version__
lowerCamelCase_ = take_from
lowerCamelCase_ = ()
if not isinstance(args[0] , lowerCamelCase__ ):
lowerCamelCase_ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase__ ).base_version ) >= version.parse(lowerCamelCase__ ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
lowerCamelCase_ = None
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase__ ),)
lowerCamelCase_ = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
values += (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
lowerCamelCase_ = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
lowerCamelCase_ = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
lowerCamelCase_ = warning + " " if standard_warn else ""
warnings.warn(warning + message , lowerCamelCase__ , stacklevel=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowerCamelCase_ = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase_ = call_frame.filename
lowerCamelCase_ = call_frame.lineno
lowerCamelCase_ = call_frame.function
lowerCamelCase_ , lowerCamelCase_ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowerCamelCase__ ) == 0:
return
elif len(lowerCamelCase__ ) == 1:
return values[0]
return values
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), F'{len(lowerCamelCase__ )} != {len(lowerCamelCase__ )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__A ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__A ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
try:
lowerCamelCase_ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(lowerCamelCase__ ) )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(lowerCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = "student" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ):
lowerCamelCase_ = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) # purely for convenience
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).eval()
else:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F'teacher must be a model or string got type {type(lowerCamelCase__ )}'
lowerCamelCase_ = teacher.config.to_diff_dict()
try:
lowerCamelCase_ , lowerCamelCase_ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCamelCase_ = teacher_e
if d is None:
lowerCamelCase_ = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
lowerCamelCase_ , lowerCamelCase_ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCamelCase_ , lowerCamelCase_ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCamelCase_ = teacher_e
if d is None:
lowerCamelCase_ = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase__ )
# Copy weights
lowerCamelCase_ = teacher.config_class(**lowerCamelCase__ )
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCamelCase_ = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCamelCase_ , lowerCamelCase_ = list(range(lowerCamelCase__ ) ), list(range(lowerCamelCase__ ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(lowerCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCamelCase_ = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
if d_layers_to_copy is None:
lowerCamelCase_ = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
try:
if hasattr(
lowerCamelCase__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase__ )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
lowerCamelCase_ = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 19 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__A =logging.getLogger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = git.Repo(search_parent_directories=lowerCamelCase__ )
lowerCamelCase_ = {
"repo_id": str(lowerCamelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(lowerCamelCase__ , "git_log.json" ) , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=4 )
def lowerCamelCase_ ( lowerCamelCase__ ):
if params.n_gpu <= 0:
lowerCamelCase_ = 0
lowerCamelCase_ = -1
lowerCamelCase_ = True
lowerCamelCase_ = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCamelCase_ = int(os.environ["WORLD_SIZE"] )
lowerCamelCase_ = int(os.environ["N_GPU_NODE"] )
lowerCamelCase_ = int(os.environ["RANK"] )
# number of nodes / node ID
lowerCamelCase_ = params.world_size // params.n_gpu_per_node
lowerCamelCase_ = params.global_rank // params.n_gpu_per_node
lowerCamelCase_ = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCamelCase_ = 1
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCamelCase_ = params.node_id == 0 and params.local_rank == 0
lowerCamelCase_ = params.n_nodes > 1
# summary
lowerCamelCase_ = F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def lowerCamelCase_ ( lowerCamelCase__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 19 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A ={'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''DeiTFeatureExtractor''']
__A =['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase_ ( lowerCamelCase__=None ):
if subparsers is not None:
lowerCamelCase_ = subparsers.add_parser("test" )
else:
lowerCamelCase_ = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=lowerCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
lowerCamelCase_ = script_name
else:
lowerCamelCase_ = F'--config_file={args.config_file} {script_name}'
lowerCamelCase_ = ["accelerate-launch"] + test_args.split()
lowerCamelCase_ = execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowerCamelCase_ ( ):
lowerCamelCase_ = test_command_parser()
lowerCamelCase_ = parser.parse_args()
test_command(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = TextToVideoSDPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase__ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> Optional[int]:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = TextToVideoSDPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = "np"
lowerCamelCase_ = sd_pipe(**lowercase ).frames
lowerCamelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCamelCase_ = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def SCREAMING_SNAKE_CASE_( self ) -> str:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return super().test_progress_bar()
@slow
@skip_mps
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
lowerCamelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
lowerCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase_ = pipe.to("cuda" )
lowerCamelCase_ = "Spiderman is surfing"
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe(lowercase , generator=lowercase , num_inference_steps=25 , output_type="pt" ).frames
lowerCamelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
lowerCamelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
lowerCamelCase_ = pipe.to("cuda" )
lowerCamelCase_ = "Spiderman is surfing"
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe(lowercase , generator=lowercase , num_inference_steps=2 , output_type="pt" ).frames
lowerCamelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 19 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionSAGPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> List[str]:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowerCamelCase_ = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = "."
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase_ = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = "."
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase_ = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = "."
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
lowerCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE_( *lowercase , **lowercase ) -> int:
pass
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = DepthEstimationPipeline(model=lowercase , image_processor=lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , lowercase )
import datasets
lowerCamelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
lowerCamelCase_ = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , lowercase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = "Intel/dpt-large"
lowerCamelCase_ = pipeline("depth-estimation" , model=lowercase )
lowerCamelCase_ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
lowerCamelCase_ = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 19 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 1 |
import argparse
import struct
import unittest
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase ) -> None:
lowerCamelCase_ = data
# Initialize hash values
lowerCamelCase_ = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
lowerCamelCase_ = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
lowerCamelCase_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> bytes:
lowerCamelCase_ = b"\x80" + (b"\x00" * (63 - (len(lowercase ) + 8) % 64))
lowerCamelCase_ = struct.pack(">Q" , (len(lowercase ) * 8) )
return data + padding + big_endian_integer
def SCREAMING_SNAKE_CASE_( self ) -> None:
# Convert into blocks of 64 bytes
lowerCamelCase_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase_ = list(struct.unpack(">16L" , lowercase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
lowerCamelCase_ = self.ror(lowercase , 6 ) ^ self.ror(lowercase , 11 ) ^ self.ror(lowercase , 25 )
lowerCamelCase_ = (e & f) ^ ((~e & 0xff_fff_fff) & g)
lowerCamelCase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
lowerCamelCase_ = self.ror(lowercase , 2 ) ^ self.ror(lowercase , 13 ) ^ self.ror(lowercase , 22 )
lowerCamelCase_ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase_ = (sa + maj) % 0x100_000_000
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
lowerCamelCase_ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase_ = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
lowerCamelCase_ = "".join([hex(lowercase )[2:].zfill(8 ) for value in self.hashes] )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> int:
return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> None:
import hashlib
lowerCamelCase_ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(lowercase ).hash , hashlib.shaaaa(lowercase ).hexdigest() )
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCamelCase_ = f.read()
else:
lowerCamelCase_ = bytes(lowerCamelCase__ , "utf-8" )
print(SHAaaa(lowerCamelCase__ ).hash )
if __name__ == "__main__":
main()
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ),
] , )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
lowerCamelCase_ = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__ , "dataset_info.json" ) )
def lowerCamelCase_ ( ):
lowerCamelCase_ = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
lowerCamelCase_ = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCamelCase_ = yaml.safe_dump(lowerCamelCase__ )
lowerCamelCase_ = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase_ ( ):
lowerCamelCase_ = DatasetInfo()
lowerCamelCase_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCamelCase_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCamelCase_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__ , "README.md" ) )
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**lowercase )
lowerCamelCase_ = size if size is not None else {"shortest_edge": 256}
lowerCamelCase_ = get_size_dict(lowercase , default_to_square=lowercase )
lowerCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ = get_size_dict(lowercase )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
lowerCamelCase_ = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase_ = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
lowerCamelCase_ = get_size_dict(lowercase )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = None , **lowercase ) -> np.ndarray:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Optional[Any]:
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(lowercase , default_to_square=lowercase )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(lowercase )
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
lowerCamelCase_ = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 19 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.json'''}
__A ={
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__A ={'''mgp-str''': 2_7}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase , lowercase="[GO]" , lowercase="[GO]" , lowercase="[s]" , lowercase="[GO]" , **lowercase ) -> List[str]:
super().__init__(
unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , **lowercase , )
with open(lowercase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase_ = json.load(lowercase )
lowerCamelCase_ = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return len(self.vocab )
def SCREAMING_SNAKE_CASE_( self ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = []
for s in text:
char_tokens.extend(lowercase )
return char_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
return self.decoder.get(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase ) )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + "\n" )
return (vocab_file,)
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 1 |
from collections.abc import Callable
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = a
lowerCamelCase_ = b
if function(lowerCamelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCamelCase__ ) == 0:
return b
elif (
function(lowerCamelCase__ ) * function(lowerCamelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCamelCase_ = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(lowerCamelCase__ ) == 0:
return mid
elif function(lowerCamelCase__ ) * function(lowerCamelCase__ ) < 0:
lowerCamelCase_ = mid
else:
lowerCamelCase_ = mid
lowerCamelCase_ = start + (end - start) / 2.0
return mid
def lowerCamelCase_ ( lowerCamelCase__ ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 19 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'MCTCTFeatureExtractor'
lowerCAmelCase__ = 'AutoTokenizer'
def __init__( self , lowercase , lowercase ) -> str:
super().__init__(lowercase , lowercase )
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
def __call__( self , *lowercase , **lowercase ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase_ = kwargs.pop("raw_speech" )
else:
lowerCamelCase_ = kwargs.pop("audio" , lowercase )
lowerCamelCase_ = kwargs.pop("sampling_rate" , lowercase )
lowerCamelCase_ = kwargs.pop("text" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
lowerCamelCase_ = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_ = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[Any]:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase )
lowerCamelCase_ = kwargs.pop("input_features" , lowercase )
lowerCamelCase_ = kwargs.pop("labels" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if input_features is not None:
lowerCamelCase_ = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
if labels is not None:
lowerCamelCase_ = self.tokenizer.pad(lowercase , **lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase_ = labels["input_ids"]
return input_features
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> int:
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE_( self ) -> str:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__A =logging.getLogger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'token-classification'
def __init__( self , lowercase ) -> Optional[Any]:
if type(lowercase ) == dict:
lowerCamelCase_ = Namespace(**lowercase )
lowerCamelCase_ = import_module("tasks" )
try:
lowerCamelCase_ = getattr(lowercase , hparams.task_type )
lowerCamelCase_ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
lowerCamelCase_ = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase_ = CrossEntropyLoss().ignore_index
super().__init__(lowercase , len(self.labels ) , self.mode )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> List[str]:
return self.model(**lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> str:
lowerCamelCase_ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ = self(**lowercase )
lowerCamelCase_ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase_ = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase )
lowerCamelCase_ = torch.load(lowercase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase_ = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase )
lowerCamelCase_ = self.token_classification_task.convert_examples_to_features(
lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase )
torch.save(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCamelCase_ = self._feature_file(lowercase )
logger.info("Loading features from cached file %s" , lowercase )
lowerCamelCase_ = torch.load(lowercase )
lowerCamelCase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase_ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase_ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
"""Compute validation""" ""
lowerCamelCase_ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase_ = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase_ = self(**lowercase )
lowerCamelCase_ , lowerCamelCase_ = outputs[:2]
lowerCamelCase_ = logits.detach().cpu().numpy()
lowerCamelCase_ = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowerCamelCase_ = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowerCamelCase_ = np.argmax(lowercase , axis=2 )
lowerCamelCase_ = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase_ = dict(enumerate(self.labels ) )
lowerCamelCase_ = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase_ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase_ = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase , lowercase ),
"precision": precision_score(lowercase , lowercase ),
"recall": recall_score(lowercase , lowercase ),
"f1": fa_score(lowercase , lowercase ),
}
lowerCamelCase_ = dict(results.items() )
lowerCamelCase_ = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
# when stable
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._eval_end(lowercase )
lowerCamelCase_ = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
# updating to test_epoch_end instead of deprecated test_end
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = self._eval_end(lowercase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase_ = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase , lowercase ) -> Any:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
__A =argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__A =NERTransformer.add_model_specific_args(parser, os.getcwd())
__A =parser.parse_args()
__A =NERTransformer(args)
__A =generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__A =sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__A =model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 19 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'Speech2TextFeatureExtractor'
lowerCAmelCase__ = 'Speech2TextTokenizer'
def __init__( self , lowercase , lowercase ) -> Optional[int]:
super().__init__(lowercase , lowercase )
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
def __call__( self , *lowercase , **lowercase ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase_ = kwargs.pop("raw_speech" )
else:
lowerCamelCase_ = kwargs.pop("audio" , lowercase )
lowerCamelCase_ = kwargs.pop("sampling_rate" , lowercase )
lowerCamelCase_ = kwargs.pop("text" , lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = args[0]
lowerCamelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase_ = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
lowerCamelCase_ = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase_ = encodings["input_ids"]
return inputs
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Any:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> Dict:
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def SCREAMING_SNAKE_CASE_( self ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer
yield
lowerCamelCase_ = self.feature_extractor
lowerCamelCase_ = False
| 19 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__A =get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__A =json.load(f)
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> str:
return FSMTTokenizer.from_pretrained(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 2_6.0],
["ru-en", 2_2.0],
["en-de", 2_2.0],
["de-en", 2_9.0],
] )
@slow
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> str:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCamelCase_ = f'facebook/wmt19-{pair}'
lowerCamelCase_ = self.get_tokenizer(lowercase )
lowerCamelCase_ = self.get_model(lowercase )
lowerCamelCase_ = bleu_data[pair]["src"]
lowerCamelCase_ = bleu_data[pair]["tgt"]
lowerCamelCase_ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
lowerCamelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase_ = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
lowerCamelCase_ = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores["bleu"] , lowercase )
| 19 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__A ={
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__A ={
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase_ = numpy_to_pil(lowerCamelCase__ )
return images
def lowerCamelCase_ ( lowerCamelCase__ ):
if images.ndim == 3:
lowerCamelCase_ = images[None, ...]
lowerCamelCase_ = (images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowerCamelCase_ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
lowerCamelCase_ = [Image.fromarray(lowerCamelCase__ ) for image in images]
return pil_images
| 19 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 19 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A =importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A =[
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase_ ( lowerCamelCase__ ):
if "://" in dataset_path:
lowerCamelCase_ = dataset_path.split("://" )[1]
return dataset_path
def lowerCamelCase_ ( lowerCamelCase__ ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = not is_remote_filesystem(lowerCamelCase__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase__ ) , fs._strip_protocol(lowerCamelCase__ ) )
else:
fs.mv(lowerCamelCase__ , lowerCamelCase__ , recursive=lowerCamelCase__ )
def lowerCamelCase_ ( ):
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = threading.Lock()
| 19 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 1 |
import qiskit
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase_ = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
if attention_mask is None:
lowerCamelCase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCamelCase__ )
if decoder_head_mask is None:
lowerCamelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase__ )
if cross_attn_head_mask is None:
lowerCamelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> int:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = bos_token_id
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = self.eos_token_id # Eos Token
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase_ = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = prepare_mam_aaa_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ , lowerCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Any:
lowerCamelCase_ = MaMaaaModel(config=lowercase ).get_decoder().to(lowercase ).eval()
lowerCamelCase_ = inputs_dict["input_ids"]
lowerCamelCase_ = inputs_dict["attention_mask"]
lowerCamelCase_ = inputs_dict["head_mask"]
# first forward pass
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )["last_hidden_state"]
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[
"last_hidden_state"
]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-2 ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = MaMaaaModel(config=lowercase ).to(lowercase ).eval()
lowerCamelCase_ = model(**lowercase )
lowerCamelCase_ = outputs.encoder_last_hidden_state
lowerCamelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = model.get_encoder()
encoder.save_pretrained(lowercase )
lowerCamelCase_ = MaMaaaEncoder.from_pretrained(lowercase ).to(lowercase )
lowerCamelCase_ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = model.get_decoder()
decoder.save_pretrained(lowercase )
lowerCamelCase_ = MaMaaaDecoder.from_pretrained(lowercase ).to(lowercase )
lowerCamelCase_ = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowercase , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = MaMaaaModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
lowerCamelCase_ , lowerCamelCase_ = model_class.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertEqual(info["missing_keys"] , [] )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = copy.deepcopy(self._prepare_for_class(lowercase , lowercase ) )
if not self.is_encoder_decoder:
lowerCamelCase_ = inputs["input_ids"]
del inputs["input_ids"]
else:
lowerCamelCase_ = inputs["input_ids"]
lowerCamelCase_ = inputs.get("decoder_input_ids" , lowercase )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , lowercase )
lowerCamelCase_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCamelCase_ = wte(lowercase )
else:
lowerCamelCase_ = wte(lowercase )
lowerCamelCase_ = wte(lowercase )
with torch.no_grad():
model(**lowercase )[0]
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase_ = input_dict["input_ids"]
lowerCamelCase_ = input_ids.ne(1 ).to(lowercase )
lowerCamelCase_ = MaMaaaForConditionalGeneration(lowercase ).eval().to(lowercase )
if torch_device == "cuda":
model.half()
model.generate(lowercase , attention_mask=lowercase )
model.generate(num_beams=4 , do_sample=lowercase , early_stopping=lowercase , num_return_sequences=3 )
def lowerCamelCase_ ( lowerCamelCase__ ):
return torch.tensor(lowerCamelCase__ , dtype=torch.long , device=lowerCamelCase__ )
__A =1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowercase )
lowerCamelCase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowerCamelCase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowerCamelCase_ = prepare_mam_aaa_inputs_dict(model.config , lowercase , lowercase )
with torch.no_grad():
lowerCamelCase_ = model(**lowercase )[0]
lowerCamelCase_ = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , lowercase )
# change to expected output here
lowerCamelCase_ = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase )
# change to intended input
lowerCamelCase_ = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowerCamelCase_ = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowerCamelCase_ = prepare_mam_aaa_inputs_dict(model.config , lowercase , lowercase )
with torch.no_grad():
lowerCamelCase_ = model(**lowercase )[0]
lowerCamelCase_ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowercase )
# change to expected output here
lowerCamelCase_ = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=lowercase ) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowercase )
lowerCamelCase_ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
lowerCamelCase_ = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCamelCase_ = tokenizer(lowercase , padding=lowercase , return_tensors="pt" )
lowerCamelCase_ = model.generate(
input_ids=dct["input_ids"].to(lowercase ) , attention_mask=dct["attention_mask"].to(lowercase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
lowerCamelCase_ = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
lowerCamelCase_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase , skip_special_tokens=lowercase )
assert generated == expected_en
| 19 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 1 |
import random
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = {i: [] for i in range(lowerCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase__ ):
for j in range(i + 1 , lowerCamelCase__ ):
if random.random() < probability:
graph[i].append(lowerCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase__ )
return graph
def lowerCamelCase_ ( lowerCamelCase__ ):
return {
i: [j for j in range(lowerCamelCase__ ) if i != j] for i in range(lowerCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 1 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = u
for i in range(1 , lowerCamelCase__ ):
lowerCamelCase_ = temp * (u - i)
return temp
def lowerCamelCase_ ( ):
lowerCamelCase_ = int(input("enter the numbers of values: " ) )
lowerCamelCase_ = []
for _ in range(lowerCamelCase__ ):
y.append([] )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
y[i].append(lowerCamelCase__ )
lowerCamelCase_ = 0
print("enter the values of parameters in a list: " )
lowerCamelCase_ = list(map(lowerCamelCase__ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCamelCase__ ):
lowerCamelCase_ = float(input() )
lowerCamelCase_ = int(input("enter the value to interpolate: " ) )
lowerCamelCase_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase__ ):
for j in range(n - i ):
lowerCamelCase_ = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase_ = y[0][0]
for i in range(1 , lowerCamelCase__ ):
summ += (ucal(lowerCamelCase__ , lowerCamelCase__ ) * y[0][i]) / math.factorial(lowerCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 1 |
from __future__ import annotations
from typing import TypedDict
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def lowerCamelCase_ ( lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(lowerCamelCase__ ) )]
def lowerCamelCase_ ( lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
lowerCamelCase_ = all_rotations(lowerCamelCase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowerCamelCase_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCamelCase__ ),
}
return response
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
lowerCamelCase_ = int(lowerCamelCase__ )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(lowerCamelCase__ ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
lowerCamelCase_ = [""] * len(lowerCamelCase__ )
for _ in range(len(lowerCamelCase__ ) ):
for i in range(len(lowerCamelCase__ ) ):
lowerCamelCase_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__A ='''Provide a string that I will generate its BWT transform: '''
__A =input(entry_msg).strip()
__A =bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result["bwt_string"]}'"""
)
__A =reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
F"""we get original string '{original_string}'"""
)
| 19 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A =logging.get_logger(__name__)
__A ={
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'deformable_detr'
lowerCAmelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , lowercase=True , lowercase=None , lowercase=3 , lowercase=300 , lowercase=1024 , lowercase=6 , lowercase=1024 , lowercase=8 , lowercase=6 , lowercase=1024 , lowercase=8 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0_2 , lowercase=1.0 , lowercase=True , lowercase=False , lowercase="sine" , lowercase="resnet50" , lowercase=True , lowercase=False , lowercase=4 , lowercase=4 , lowercase=4 , lowercase=False , lowercase=300 , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=0.1 , lowercase=0.2_5 , lowercase=False , **lowercase , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase , lowercase ):
lowerCamelCase_ = backbone_config.get("model_type" )
lowerCamelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ = config_class.from_dict(lowercase )
lowerCamelCase_ = use_timm_backbone
lowerCamelCase_ = backbone_config
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_queries
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = init_xavier_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = auxiliary_loss
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = backbone
lowerCamelCase_ = use_pretrained_backbone
lowerCamelCase_ = dilation
# deformable attributes
lowerCamelCase_ = num_feature_levels
lowerCamelCase_ = encoder_n_points
lowerCamelCase_ = decoder_n_points
lowerCamelCase_ = two_stage
lowerCamelCase_ = two_stage_num_proposals
lowerCamelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCamelCase_ = class_cost
lowerCamelCase_ = bbox_cost
lowerCamelCase_ = giou_cost
# Loss coefficients
lowerCamelCase_ = mask_loss_coefficient
lowerCamelCase_ = dice_loss_coefficient
lowerCamelCase_ = bbox_loss_coefficient
lowerCamelCase_ = giou_loss_coefficient
lowerCamelCase_ = eos_coefficient
lowerCamelCase_ = focal_alpha
lowerCamelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase_ = self.backbone_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 19 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__A =datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
lowerCAmelCase__ = None
lowerCAmelCase__ = "utf-8"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = True # deprecated
lowerCAmelCase__ = None # deprecated
lowerCAmelCase__ = 10 << 20 # 10MB
lowerCAmelCase__ = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
lowerCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
lowerCamelCase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase , (str, list, tuple) ):
lowerCamelCase_ = data_files
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = [files]
lowerCamelCase_ = [dl_manager.iter_files(lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = [files]
lowerCamelCase_ = [dl_manager.iter_files(lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowerCamelCase_ = self.config.features.arrow_schema.field(lowercase ).type
lowerCamelCase_ = pa_table.append_column(lowercase , pa.array([None] * len(lowercase ) , type=lowercase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ = table_cast(lowercase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowercase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCamelCase_ = json.load(lowercase )
# We keep only the field we are interested in
lowerCamelCase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowercase , (list, tuple) ):
lowerCamelCase_ = set().union(*[row.keys() for row in dataset] )
lowerCamelCase_ = {col: [row.get(lowercase ) for row in dataset] for col in keys}
else:
lowerCamelCase_ = dataset
lowerCamelCase_ = pa.Table.from_pydict(lowercase )
yield file_idx, self._cast_table(lowercase )
# If the file has one json object per line
else:
with open(lowercase , "rb" ) as f:
lowerCamelCase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowerCamelCase_ = max(self.config.chunksize // 32 , 16 << 10 )
lowerCamelCase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
lowerCamelCase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowercase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowerCamelCase_ = batch.decode(self.config.encoding , errors=lowercase ).encode("utf-8" )
try:
while True:
try:
lowerCamelCase_ = paj.read_json(
io.BytesIO(lowercase ) , read_options=paj.ReadOptions(block_size=lowercase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowercase , pa.ArrowInvalid )
and "straddling" not in str(lowercase )
or block_size > len(lowercase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(lowercase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowercase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowerCamelCase_ = json.load(lowercase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(lowercase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowercase , lowercase ): # list is the only sequence type supported in JSON
try:
lowerCamelCase_ = set().union(*[row.keys() for row in dataset] )
lowerCamelCase_ = {col: [row.get(lowercase ) for row in dataset] for col in keys}
lowerCamelCase_ = pa.Table.from_pydict(lowercase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(lowercase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(lowercase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(lowercase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowercase )
batch_idx += 1
| 19 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase_ ( ):
lowerCamelCase_ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCamelCase__ )
lowerCamelCase_ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase__ )
env_command_parser(subparsers=lowerCamelCase__ )
launch_command_parser(subparsers=lowerCamelCase__ )
tpu_command_parser(subparsers=lowerCamelCase__ )
test_command_parser(subparsers=lowerCamelCase__ )
# Let's go
lowerCamelCase_ = parser.parse_args()
if not hasattr(lowerCamelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ='''▁'''
__A ={'''vocab_file''': '''sentencepiece.bpe.model'''}
__A ={
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
__A ={
'''facebook/xglm-564M''': 2_0_4_8,
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase = None , **lowercase , ) -> None:
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCamelCase_ = 7
lowerCamelCase_ = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
lowerCamelCase_ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
lowerCamelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase_ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCamelCase_ = len(self.sp_model )
lowerCamelCase_ = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase )
lowerCamelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
lowerCamelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ) -> str:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCamelCase_ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase ))
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase ))
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ = self.sp_model.PieceToId(lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = "".join(lowercase ).replace(lowercase , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 19 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
lowerCamelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase_ = np.concatenate(lowerCamelCase__ , axis=0 )
lowerCamelCase_ = np.array(lowerCamelCase__ ).astype(np.floataa ) / 2_55.0
lowerCamelCase_ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ = 2.0 * image - 1.0
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowerCamelCase__ , dim=0 )
return image
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0.99_95 ):
if not isinstance(lowerCamelCase__ , np.ndarray ):
lowerCamelCase_ = True
lowerCamelCase_ = va.device
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = va.cpu().numpy()
lowerCamelCase_ = np.sum(va * va / (np.linalg.norm(lowerCamelCase__ ) * np.linalg.norm(lowerCamelCase__ )) )
if np.abs(lowerCamelCase__ ) > DOT_THRESHOLD:
lowerCamelCase_ = (1 - t) * va + t * va
else:
lowerCamelCase_ = np.arccos(lowerCamelCase__ )
lowerCamelCase_ = np.sin(lowerCamelCase__ )
lowerCamelCase_ = theta_a * t
lowerCamelCase_ = np.sin(lowerCamelCase__ )
lowerCamelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ = sin_theta_t / sin_theta_a
lowerCamelCase_ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
return va
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = F.normalize(lowerCamelCase__ , dim=-1 )
lowerCamelCase_ = F.normalize(lowerCamelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for param in model.parameters():
lowerCamelCase_ = value
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=lowercase , text_encoder=lowercase , clip_model=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , feature_extractor=lowercase , coca_model=lowercase , coca_tokenizer=lowercase , coca_transform=lowercase , )
lowerCamelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , lowercase )
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowercase )
set_requires_grad(self.clip_model , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
set_requires_grad(self.vae , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
set_requires_grad(self.vae , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
set_requires_grad(self.unet , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
set_requires_grad(self.unet , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> List[Any]:
# get the original timestep using init_timestep
lowerCamelCase_ = min(int(num_inference_steps * strength ) , lowercase )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> Any:
if not isinstance(lowercase , torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(lowercase )}' )
lowerCamelCase_ = image.to(device=lowercase , dtype=lowercase )
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase )
]
lowerCamelCase_ = torch.cat(lowercase , dim=0 )
else:
lowerCamelCase_ = self.vae.encode(lowercase ).latent_dist.sample(lowercase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 0.1_8_2_1_5 * init_latents
lowerCamelCase_ = init_latents.repeat_interleave(lowercase , dim=0 )
lowerCamelCase_ = randn_tensor(init_latents.shape , generator=lowercase , device=lowercase , dtype=lowercase )
# get latents
lowerCamelCase_ = self.scheduler.add_noise(lowercase , lowercase , lowercase )
lowerCamelCase_ = init_latents
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
lowerCamelCase_ = self.coca_transform(lowercase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> str:
lowerCamelCase_ = self.feature_extractor.preprocess(lowercase )
lowerCamelCase_ = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ = self.clip_model.get_image_features(lowercase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
lowerCamelCase_ = image_embeddings_clip.repeat_interleave(lowercase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
lowerCamelCase_ = latents.detach().requires_grad_()
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
lowerCamelCase_ = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ = torch.sqrt(lowercase )
lowerCamelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowercase ):
lowerCamelCase_ = self.scheduler.sigmas[index]
lowerCamelCase_ = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.1_8_2_1_5 * sample
lowerCamelCase_ = self.vae.decode(lowercase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = transforms.Resize(self.feature_extractor_size )(lowercase )
lowerCamelCase_ = self.normalize(lowercase ).to(latents.dtype )
lowerCamelCase_ = self.clip_model.get_image_features(lowercase )
lowerCamelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase )
lowerCamelCase_ = spherical_dist_loss(lowercase , lowercase ).mean() * clip_guidance_scale
lowerCamelCase_ = -torch.autograd.grad(lowercase , lowercase )[0]
if isinstance(self.scheduler , lowercase ):
lowerCamelCase_ = latents.detach() + grads * (sigma**2)
lowerCamelCase_ = noise_pred_original
else:
lowerCamelCase_ = noise_pred_original - torch.sqrt(lowercase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase = None , lowercase = None , lowercase = 512 , lowercase = 512 , lowercase = 0.6 , lowercase = 50 , lowercase = 7.5 , lowercase = 1 , lowercase = 0.0 , lowercase = 100 , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = 0.8 , lowercase = 0.1 , lowercase = 0.1 , ) -> int:
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(lowercase )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(lowercase , torch.Generator ) and batch_size > 1:
lowerCamelCase_ = [generator] + [None] * (batch_size - 1)
lowerCamelCase_ = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase_ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ = ", ".join(lowercase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowercase ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowerCamelCase_ = self.get_image_description(lowercase )
if style_prompt is None:
if len(lowercase ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
lowerCamelCase_ = self.get_image_description(lowercase )
# get prompt text embeddings for content and style
lowerCamelCase_ = self.tokenizer(
lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = self.tokenizer(
lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase , return_tensors="pt" , )
lowerCamelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ = slerp(lowercase , lowercase , lowercase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ = text_embeddings.repeat_interleave(lowercase , dim=0 )
# set timesteps
lowerCamelCase_ = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_offset:
lowerCamelCase_ = 1
self.scheduler.set_timesteps(lowercase , **lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ , lowerCamelCase_ = self.get_timesteps(lowercase , lowercase , self.device )
lowerCamelCase_ = timesteps[:1].repeat(lowercase )
# Preprocess image
lowerCamelCase_ = preprocess(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
lowerCamelCase_ = preprocess(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.prepare_latents(
lowercase , lowercase , lowercase , text_embeddings.dtype , self.device , lowercase )
lowerCamelCase_ = slerp(lowercase , lowercase , lowercase )
if clip_guidance_scale > 0:
lowerCamelCase_ = self.get_clip_image_embeddings(lowercase , lowercase )
lowerCamelCase_ = self.get_clip_image_embeddings(lowercase , lowercase )
lowerCamelCase_ = slerp(
lowercase , lowercase , lowercase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ = content_text_input.input_ids.shape[-1]
lowerCamelCase_ = self.tokenizer([""] , padding="max_length" , max_length=lowercase , return_tensors="pt" )
lowerCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ = uncond_embeddings.repeat_interleave(lowercase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ = torch.randn(lowercase , generator=lowercase , device="cpu" , dtype=lowercase ).to(
self.device )
else:
lowerCamelCase_ = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCamelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ = {}
if accepts_eta:
lowerCamelCase_ = eta
# check if the scheduler accepts generator
lowerCamelCase_ = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ = generator
with self.progress_bar(total=lowercase ):
for i, t in enumerate(lowercase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
# predict the noise residual
lowerCamelCase_ = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ , lowerCamelCase_ = self.cond_fn(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ = 1 / 0.1_8_2_1_5 * latents
lowerCamelCase_ = self.vae.decode(lowercase ).sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(lowercase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase )
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase_ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCamelCase_ = 4
lowerCamelCase_ = 4_8
lowerCamelCase_ = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase_ = [6, 6, 6, 6]
lowerCamelCase_ = 6_0
lowerCamelCase_ = [6, 6, 6, 6]
lowerCamelCase_ = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase_ = 4
lowerCamelCase_ = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = 1_2_6
lowerCamelCase_ = 7
lowerCamelCase_ = 2_55.0
lowerCamelCase_ = ""
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
lowerCamelCase_ = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
lowerCamelCase_ = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
lowerCamelCase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
lowerCamelCase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
lowerCamelCase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
lowerCamelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
lowerCamelCase_ = "layernorm.weight"
if name == "norm.bias":
lowerCamelCase_ = "layernorm.bias"
if "conv_first" in name:
lowerCamelCase_ = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCamelCase_ = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCamelCase_ = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
lowerCamelCase_ = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
lowerCamelCase_ = name.replace("upsample.2" , "upsample.convolution_1" )
lowerCamelCase_ = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
lowerCamelCase_ = name.replace("upsample.0.weight" , "upsample.conv.weight" )
lowerCamelCase_ = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
lowerCamelCase_ = "swin2sr." + name
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[1] )
lowerCamelCase_ = int(key_split[4] )
lowerCamelCase_ = config.embed_dim
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
pass
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = get_config(lowerCamelCase__ )
lowerCamelCase_ = SwinaSRForImageSuperResolution(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ValueError("Missing keys when converting: {}".format(lowerCamelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
lowerCamelCase_ = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" )
lowerCamelCase_ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCamelCase_ = 1_2_6 if "Jpeg" in checkpoint_url else 2_5_6
lowerCamelCase_ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowerCamelCase_ = transforms(lowerCamelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
lowerCamelCase_ = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCamelCase_ = model(lowerCamelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCamelCase_ = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowerCamelCase_ = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase_ = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCamelCase_ = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCamelCase_ = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCamelCase_ = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase_ = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowerCamelCase_ = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase_ = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowerCamelCase_ = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-3 )
print("Looks ok!" )
lowerCamelCase_ = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
lowerCamelCase_ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
__A =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 1 |
from math import factorial
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_0 ):
return sum(map(lowerCamelCase__ , str(factorial(lowerCamelCase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 19 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase_ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase_ = DisjunctiveConstraint(lowercase )
self.assertTrue(isinstance(dc.token_ids , lowercase ) )
with self.assertRaises(lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase ):
DisjunctiveConstraint(lowercase ) # fails here
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase_ = DisjunctiveConstraint(lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(3 )
lowerCamelCase_ = stepped is True and completed is True and reset is False
self.assertTrue(lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase_ = DisjunctiveConstraint(lowercase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__A =re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__A =1_0
__A =2_5_6
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) < MIN_NUM_TOKENS:
return None
lowerCamelCase_ = MinHash(num_perm=lowerCamelCase__ )
for token in set(lowerCamelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase_ ( lowerCamelCase__ ):
return {t for t in NON_ALPHA.split(lowerCamelCase__ ) if len(t.strip() ) > 0}
class _SCREAMING_SNAKE_CASE :
def __init__( self , *,
lowercase = 0.8_5 , ) -> int:
lowerCamelCase_ = duplication_jaccard_threshold
lowerCamelCase_ = NUM_PERM
lowerCamelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowerCamelCase_ = defaultdict(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> None:
lowerCamelCase_ = self._index.query(lowercase )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase , lowercase )
if len(lowercase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[List[Dict]]:
lowerCamelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCamelCase_ = [base] + list(lowercase )
# reformat the cluster to be a list of dict
lowerCamelCase_ = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowercase )
return duplicate_clusters
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.get_duplicate_clusters()
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = element
lowerCamelCase_ = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase_ ( lowerCamelCase__ ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase__ ) ) , max_queue_size=1_0_0 ) ):
di.add(lowerCamelCase__ , lowerCamelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = get_tokens(lowerCamelCase__ )
lowerCamelCase_ = get_tokens(lowerCamelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__A =None
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
for elementa in cluster:
lowerCamelCase_ = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowerCamelCase_ = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(lowerCamelCase__ , lowerCamelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCamelCase_ = 1
extremes.append(lowerCamelCase__ )
return extremes
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
global _shared_dataset
lowerCamelCase_ = dataset
lowerCamelCase_ = []
lowerCamelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase__ , lowerCamelCase__ , ) , total=len(lowerCamelCase__ ) , ):
extremes_list.append(lowerCamelCase__ )
return extremes_list
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0.85 ):
lowerCamelCase_ = make_duplicate_clusters(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowerCamelCase_ = {}
lowerCamelCase_ = find_extremes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowerCamelCase_ = element
lowerCamelCase_ = duplicate_indices - set(extreme_dict.keys() )
lowerCamelCase_ = dataset.filter(lambda lowerCamelCase__ , lowerCamelCase__ : idx not in remove_indices , with_indices=lowerCamelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCamelCase_ = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowerCamelCase_ = extreme_dict[element["base_index"]]["copies"]
print(F'Original dataset size: {len(lowerCamelCase__ )}' )
print(F'Number of duplicate clusters: {len(lowerCamelCase__ )}' )
print(F'Files in duplicate cluster: {len(lowerCamelCase__ )}' )
print(F'Unique files in duplicate cluster: {len(lowerCamelCase__ )}' )
print(F'Filtered dataset size: {len(lowerCamelCase__ )}' )
return ds_filter, duplicate_clusters
| 19 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase_ ( lowerCamelCase__ ):
# getting number of pixels in the image
lowerCamelCase_ , lowerCamelCase_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
lowerCamelCase_ = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__A =imread('''image_data/lena.jpg''', 1)
# convert to its negative
__A =convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 19 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( lowerCamelCase__ ):
return (data["data"], data["target"])
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = XGBClassifier()
classifier.fit(lowerCamelCase__ , lowerCamelCase__ )
return classifier
def lowerCamelCase_ ( ):
lowerCamelCase_ = load_iris()
lowerCamelCase_ , lowerCamelCase_ = data_handling(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.25 )
lowerCamelCase_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCamelCase_ = xgboost(lowerCamelCase__ , lowerCamelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , display_labels=lowerCamelCase__ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 19 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 1 |
from PIL import Image
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = image.size
lowerCamelCase_ = 0
lowerCamelCase_ = image.load()
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
lowerCamelCase_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase__ ):
for i in range(lowerCamelCase__ ):
lowerCamelCase_ = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__A =mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 19 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase_ ( lowerCamelCase__ ):
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase_ ( ):
lowerCamelCase_ = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=lowerCamelCase__ )
lowerCamelCase_ = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase__ )
EnvironmentCommand.register_subcommand(lowerCamelCase__ )
TestCommand.register_subcommand(lowerCamelCase__ )
RunBeamCommand.register_subcommand(lowerCamelCase__ )
DummyDataCommand.register_subcommand(lowerCamelCase__ )
# Parse args
lowerCamelCase_ , lowerCamelCase_ = parser.parse_known_args()
if not hasattr(lowerCamelCase__ , "func" ):
parser.print_help()
exit(1 )
lowerCamelCase_ = parse_unknown_args(lowerCamelCase__ )
# Run
lowerCamelCase_ = args.func(lowerCamelCase__ , **lowerCamelCase__ )
service.run()
if __name__ == "__main__":
main()
| 19 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ ):
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase_ , lowerCamelCase_ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
import random
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = num - 1
lowerCamelCase_ = 0
while s % 2 == 0:
lowerCamelCase_ = s // 2
t += 1
for _ in range(5 ):
lowerCamelCase_ = random.randrange(2 , num - 1 )
lowerCamelCase_ = pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if v != 1:
lowerCamelCase_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
lowerCamelCase_ = i + 1
lowerCamelCase_ = (v**2) % num
return True
def lowerCamelCase_ ( lowerCamelCase__ ):
if num < 2:
return False
lowerCamelCase_ = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ = 1_0_2_4 ):
while True:
lowerCamelCase_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCamelCase__ ):
return num
if __name__ == "__main__":
__A =generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 19 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__A ={'''mobilebert-uncased''': 5_1_2}
__A ={}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = MobileBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Union[str, Any]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(lowercase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**lowercase )
lowerCamelCase_ = do_lower_case
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> str:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 1 |
__A =[
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 19 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__A =datasets.utils.logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
lowerCAmelCase__ = 1_00_00
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
lowerCAmelCase__ = ParquetConfig
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowercase , (str, list, tuple) ):
lowerCamelCase_ = data_files
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(lowercase , lowercase ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowercase ):
with open(lowercase , "rb" ) as f:
lowerCamelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(lowercase ) )
break
splits.append(datasets.SplitGenerator(name=lowercase , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ = table_cast(lowercase , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
lowerCamelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowercase ) ):
with open(lowercase , "rb" ) as f:
lowerCamelCase_ = pq.ParquetFile(lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(lowercase )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(lowercase )}: {e}' )
raise
| 19 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionPanoramaPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCamelCase_ = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase_ = CLIPTextModel(lowercase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> List[str]:
lowerCamelCase_ = torch.manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> int:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = "french fries"
lowerCamelCase_ = sd_pipe(**lowercase , negative_prompt=lowercase )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase , view_batch_size=2 )
lowerCamelCase_ = output.images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , skip_prk_steps=lowercase )
lowerCamelCase_ = StableDiffusionPanoramaPipeline(**lowercase )
lowerCamelCase_ = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = sd_pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> Any:
lowerCamelCase_ = torch.manual_seed(lowercase )
lowerCamelCase_ = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCamelCase_ = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowercase )
lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowerCamelCase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = 0
def callback_fn(lowercase , lowercase , lowercase ) -> None:
lowerCamelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCamelCase_ = latents[0, -3:, -3:, -1]
lowerCamelCase_ = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase_ = False
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
lowerCamelCase_ = self.get_inputs()
pipe(**lowercase , callback=lowercase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = "stabilityai/stable-diffusion-2-base"
lowerCamelCase_ = DDIMScheduler.from_pretrained(lowercase , subfolder="scheduler" )
lowerCamelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = self.get_inputs()
lowerCamelCase_ = pipe(**lowercase )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 19 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> int:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 19 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = DanceDiffusionPipeline
lowerCAmelCase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowerCAmelCase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
lowerCamelCase_ = IPNDMScheduler()
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> Tuple:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = DanceDiffusionPipeline(**lowercase )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = pipe(**lowercase )
lowerCamelCase_ = output.audios
lowerCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCamelCase_ = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE_( self ) -> int:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return super().test_attention_slicing_forward_pass()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = torch_device
lowerCamelCase_ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCamelCase_ = output.audios
lowerCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase_ = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = torch_device
lowerCamelCase_ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCamelCase_ = output.audios
lowerCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase_ = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 19 |
from collections import deque
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = deque()
lowerCamelCase_ = [False for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = [-1 for _ in range(lowerCamelCase__ )]
lowerCamelCase_ = index_of[:]
def strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = index # the number when this node is seen
lowerCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase__ )
lowerCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase_ = strong_connect(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase_ = []
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
while w != v:
lowerCamelCase_ = stack.pop()
lowerCamelCase_ = False
component.append(lowerCamelCase__ )
components.append(lowerCamelCase__ )
return index
lowerCamelCase_ = []
for v in range(lowerCamelCase__ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase__ , 0 , lowerCamelCase__ )
return components
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [[] for _ in range(lowerCamelCase__ )]
for u, v in edges:
g[u].append(lowerCamelCase__ )
return g
if __name__ == "__main__":
# Test
__A =7
__A =[0, 0, 1, 2, 3, 3, 4, 4, 6]
__A =[1, 3, 2, 0, 1, 4, 5, 6, 5]
__A =[(u, v) for u, v in zip(source, target)]
__A =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 19 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__A =[
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
__A =[
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
__A =(
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
__A =(
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
__A =[
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for tf_name, hf_name in patterns:
lowerCamelCase_ = k.replace(lowerCamelCase__ , lowerCamelCase__ )
return k
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = BigBirdPegasusConfig(**lowerCamelCase__ )
lowerCamelCase_ = BigBirdPegasusForConditionalGeneration(lowerCamelCase__ )
lowerCamelCase_ = torch_model.state_dict()
lowerCamelCase_ = {}
# separating decoder weights
lowerCamelCase_ = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
lowerCamelCase_ = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
lowerCamelCase_ = [k.endswith(lowerCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase__ ):
continue
lowerCamelCase_ = DECODER_PATTERNS
lowerCamelCase_ = rename_state_dict_key(lowerCamelCase__ , lowerCamelCase__ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowerCamelCase_ = v.T
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
lowerCamelCase_ = [k.endswith(lowerCamelCase__ ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase__ ):
continue
lowerCamelCase_ = REMAINING_PATTERNS
lowerCamelCase_ = rename_state_dict_key(lowerCamelCase__ , lowerCamelCase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowerCamelCase_ = v.T
lowerCamelCase_ = torch.from_numpy(lowerCamelCase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
lowerCamelCase_ = mapping["model.embed_positions.weight"]
lowerCamelCase_ = mapping.pop("model.embed_positions.weight" )
lowerCamelCase_ , lowerCamelCase_ = torch_model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
lowerCamelCase_ = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = tf.train.list_variables(lowerCamelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = ["global_step"]
for name, shape in tqdm(lowerCamelCase__ , desc="converting tf checkpoint to dict" ):
lowerCamelCase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase_ = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = array
return tf_weights
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = get_tf_weights_as_numpy(lowerCamelCase__ )
lowerCamelCase_ = convert_bigbird_pegasus(lowerCamelCase__ , lowerCamelCase__ )
torch_model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__A =parser.parse_args()
__A ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
import math
__A =1_0
__A =7
__A =BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase_ ( lowerCamelCase__ = 2_0 ):
lowerCamelCase_ = math.comb(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
lowerCamelCase_ = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 19 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__A ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__A =concatenate_datasets
__A =DownloadConfig
__A =DownloadManager
__A =DownloadMode
__A =DownloadConfig
__A =DownloadMode
__A =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A =False
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = "A painting of a squirrel eating a burger "
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
lowerCamelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = generator.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = "A painting of a squirrel eating a burger "
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(
prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowerCamelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 19 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Tuple:
lowerCamelCase_ = True
lowerCamelCase_ = TFEsmModel(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase , encoder_hidden_states=lowercase )
# Also check the case where encoder outputs are not passed
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFEsmForMaskedLM(config=lowercase )
lowerCamelCase_ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFEsmForTokenClassification(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = TFEsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFEsmModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip("Protein models do not support embedding resizing." )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase_ = model.get_bias()
assert isinstance(lowercase , lowercase )
for k, v in name.items():
assert isinstance(lowercase , tf.Variable )
else:
lowerCamelCase_ = model.get_output_embeddings()
assert x is None
lowerCamelCase_ = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase )
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowerCamelCase_ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(lowercase )[0]
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 19 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_ ( ):
lowerCamelCase_ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCamelCase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCamelCase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCamelCase__ )
return parser.parse_args()
def lowerCamelCase_ ( ):
lowerCamelCase_ = parse_args()
# Import training_script as a module.
lowerCamelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase_ = script_fpath.stem
lowerCamelCase_ = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
lowerCamelCase_ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 19 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = BertJapaneseTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
super().setUp()
lowerCamelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = "こんにちは、世界。 \nこんばんは、世界。"
lowerCamelCase_ = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ , lowerCamelCase_ = self.get_input_output_texts(lowercase )
lowerCamelCase_ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
return text, ids
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> int:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowercase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowercase )
lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。"
lowerCamelCase_ = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowercase , "wb" ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , "rb" ) as handle:
lowerCamelCase_ = pickle.load(lowercase )
lowerCamelCase_ = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
try:
lowerCamelCase_ = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
try:
lowerCamelCase_ = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = MecabTokenizer(do_lower_case=lowercase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> int:
try:
lowerCamelCase_ = MecabTokenizer(
do_lower_case=lowercase , normalize_text=lowercase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = MecabTokenizer(normalize_text=lowercase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowercase )
lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。"
lowerCamelCase_ = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowercase , "wb" ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , "rb" ) as handle:
lowerCamelCase_ = pickle.load(lowercase )
lowerCamelCase_ = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = SudachiTokenizer(do_lower_case=lowercase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = SudachiTokenizer(normalize_text=lowercase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = SudachiTokenizer(trim_whitespace=lowercase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowercase )
lowerCamelCase_ = "こんにちは、世界。\nこんばんは、世界。"
lowerCamelCase_ = tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowercase , "wb" ) as handle:
pickle.dump(lowercase , lowercase )
with open(lowercase , "rb" ) as handle:
lowerCamelCase_ = pickle.load(lowercase )
lowerCamelCase_ = tokenizer_new.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = JumanppTokenizer(do_lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = JumanppTokenizer(normalize_text=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = JumanppTokenizer(trim_whitespace=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
lowerCamelCase_ = {}
for i, token in enumerate(lowercase ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
lowerCamelCase_ = tokenizer.subword_tokenizer
lowerCamelCase_ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowercase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
lowerCamelCase_ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowercase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
lowerCamelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = BertJapaneseTokenizer
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
super().setUp()
lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE_( self , **lowercase ) -> Any:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = "こんにちは、世界。 \nこんばんは、世界。"
lowerCamelCase_ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
lowerCamelCase_ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowercase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCamelCase_ = {}
for i, token in enumerate(lowercase ):
lowerCamelCase_ = i
lowerCamelCase_ = CharacterTokenizer(vocab=lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
lowerCamelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "cl-tohoku/bert-base-japanese"
lowerCamelCase_ = AutoTokenizer.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowercase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
lowerCamelCase_ = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowercase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.model'''}
__A ={
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
__A ={
'''google/rembert''': 2_5_6,
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase , lowercase=False , lowercase=True , lowercase=True , lowercase="[CLS]" , lowercase="[SEP]" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , **lowercase , ) -> str:
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , **lowercase , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = d
lowerCamelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False ) -> str:
lowerCamelCase_ = self.sp_model.EncodeAsPieces(lowercase )
return pieces
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
return self.sp_model.PieceToId(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
return self.sp_model.IdToPiece(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]:
lowerCamelCase_ = self.sp_model.decode_pieces(lowercase )
return out_string
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase ) )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A ={
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
__A ='''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if latents is None:
lowerCamelCase_ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase_ = latents.to(lowercase )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_( self , lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = self.image_processor(lowercase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
lowerCamelCase_ = image.to(dtype=self.image_encoder.dtype , device=lowercase )
lowerCamelCase_ = self.image_encoder(lowercase )["last_hidden_state"]
lowerCamelCase_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCamelCase_ = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 25 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 64 , lowercase = "pil" , lowercase = True , ) -> Union[str, Any]:
if isinstance(lowercase , PIL.Image.Image ):
lowerCamelCase_ = 1
elif isinstance(lowercase , torch.Tensor ):
lowerCamelCase_ = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCamelCase_ = len(lowercase )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}' )
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = batch_size * num_images_per_prompt
lowerCamelCase_ = guidance_scale > 1.0
lowerCamelCase_ = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.prior.config.num_embeddings
lowerCamelCase_ = self.prior.config.embedding_dim
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCamelCase_ = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = self.scheduler.scale_model_input(lowercase , lowercase )
lowerCamelCase_ = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCamelCase_ = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
lowerCamelCase_ = []
for i, latent in enumerate(lowercase ):
print()
lowerCamelCase_ = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowercase )
lowerCamelCase_ = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
lowerCamelCase_ = images.cpu().numpy()
if output_type == "pil":
lowerCamelCase_ = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = 0
while b > 0:
if b & 1:
lowerCamelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 19 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase_ ( ):
lowerCamelCase_ , lowerCamelCase_ = 9, 1_4 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowerCamelCase__ )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = 10
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = [1, 2, 3, 4]
lowerCamelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
lowerCamelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase , self.block_size , 0 ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
self.assertEqual(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = ""
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
self.assertEqual(lowercase , [] )
self.assertEqual(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
lowerCamelCase_ , lowerCamelCase_ = process_story(lowercase )
lowerCamelCase_ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(lowercase , lowercase )
lowerCamelCase_ = ["It was the best of times."]
self.assertEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = torch.tensor([1, 2, 3, 4] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCamelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = 101
lowerCamelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
lowerCamelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCamelCase_ = compute_token_type_ids(lowercase , lowercase )
np.testing.assert_array_equal(lowercase , lowercase )
| 19 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 1_6 ):
lowerCamelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ = 1_6
elif accelerator.mixed_precision != "no":
lowerCamelCase_ = 8
else:
lowerCamelCase_ = None
return tokenizer.pad(
lowerCamelCase__ , padding="longest" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
lowerCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCamelCase__ ) == "1":
lowerCamelCase_ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
lowerCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ = config["lr"]
lowerCamelCase_ = int(config["num_epochs"] )
lowerCamelCase_ = int(config["seed"] )
lowerCamelCase_ = int(config["batch_size"] )
set_seed(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase_ = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase_ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
lowerCamelCase_ = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
lowerCamelCase_ = os.path.split(lowerCamelCase__ )[-1].split("." )[0]
accelerator.init_trackers(lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
lowerCamelCase_ = 0
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
lowerCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
lowerCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase__ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowerCamelCase__ ),
"epoch": epoch,
} , step=lowerCamelCase__ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase_ ( ):
lowerCamelCase_ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowerCamelCase__ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 | 1 |
import argparse
import os
import re
__A ='''src/diffusers'''
# Pattern that looks at the indentation in a line.
__A =re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__A =re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A =re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__A =re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A =re.compile(R'''\[([^\]]+)\]''')
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = _re_indent.search(lowerCamelCase__ )
return "" if search is None else search.groups()[0]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__=None , lowerCamelCase__=None ):
lowerCamelCase_ = 0
lowerCamelCase_ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase__ ):
index += 1
lowerCamelCase_ = ["\n".join(lines[:index] )]
else:
lowerCamelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase_ = [lines[index]]
index += 1
while index < len(lowerCamelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(lowerCamelCase__ ) )
if index < len(lowerCamelCase__ ) - 1:
lowerCamelCase_ = [lines[index + 1]]
index += 1
else:
lowerCamelCase_ = []
else:
blocks.append("\n".join(lowerCamelCase__ ) )
lowerCamelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase__ ) > 0:
blocks.append("\n".join(lowerCamelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowerCamelCase_ ( lowerCamelCase__ ):
def _inner(lowerCamelCase__ ):
return key(lowerCamelCase__ ).lower().replace("_" , "" )
return _inner
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None ):
# If no key is provided, we use a noop.
def noop(lowerCamelCase__ ):
return x
if key is None:
lowerCamelCase_ = noop
# Constants are all uppercase, they go first.
lowerCamelCase_ = [obj for obj in objects if key(lowerCamelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase_ = [obj for obj in objects if key(lowerCamelCase__ )[0].isupper() and not key(lowerCamelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase_ = [obj for obj in objects if not key(lowerCamelCase__ )[0].isupper()]
lowerCamelCase_ = ignore_underscore(lowerCamelCase__ )
return sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ ) + sorted(lowerCamelCase__ , key=lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
# This inner function sort imports between [ ].
def _replace(lowerCamelCase__ ):
lowerCamelCase_ = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
lowerCamelCase_ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(lowerCamelCase__ )] ) + "]"
lowerCamelCase_ = import_statement.split("\n" )
if len(lowerCamelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase_ = 2 if lines[1].strip() == "[" else 1
lowerCamelCase_ = [(i, _re_strip_line.search(lowerCamelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase_ = sort_objects(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] )
lowerCamelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCamelCase_ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase_ = keys[:-1]
lowerCamelCase_ = get_indent(lines[1] ) + ", ".join([F'"{k}"' for k in sort_objects(lowerCamelCase__ )] )
return "\n".join(lowerCamelCase__ )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase_ = _re_bracket_content.sub(_replace , lowerCamelCase__ )
return import_statement
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=True ):
with open(lowerCamelCase__ , "r" ) as f:
lowerCamelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase_ = split_code_in_indented_blocks(
lowerCamelCase__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase_ = main_blocks[block_idx]
lowerCamelCase_ = block.split("\n" )
# Get to the start of the imports.
lowerCamelCase_ = 0
while line_idx < len(lowerCamelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase_ = len(lowerCamelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase_ = "\n".join(block_lines[line_idx:-1] )
lowerCamelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase_ = split_code_in_indented_blocks(lowerCamelCase__ , indent_level=lowerCamelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase_ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase_ = [(pattern.search(lowerCamelCase__ ).groups()[0] if pattern.search(lowerCamelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase_ = [(i, key) for i, key in enumerate(lowerCamelCase__ ) if key is not None]
lowerCamelCase_ = [x[0] for x in sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase_ = 0
lowerCamelCase_ = []
for i in range(len(lowerCamelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCamelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowerCamelCase__ )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase_ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase__ ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(lowerCamelCase__ , "w" ) as f:
f.write("\n".join(lowerCamelCase__ ) )
def lowerCamelCase_ ( lowerCamelCase__=True ):
lowerCamelCase_ = []
for root, _, files in os.walk(lowerCamelCase__ ):
if "__init__.py" in files:
lowerCamelCase_ = sort_imports(os.path.join(lowerCamelCase__ , "__init__.py" ) , check_only=lowerCamelCase__ )
if result:
lowerCamelCase_ = [os.path.join(lowerCamelCase__ , "__init__.py" )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(F'Would overwrite {len(lowerCamelCase__ )} files, run `make style`.' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__A =parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 19 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 1 |
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = graph
self._normalize_graph(lowercase , lowercase )
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[str]:
if sources is int:
lowerCamelCase_ = [sources]
if sinks is int:
lowerCamelCase_ = [sinks]
if len(lowercase ) == 0 or len(lowercase ) == 0:
return
lowerCamelCase_ = sources[0]
lowerCamelCase_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase ) > 1 or len(lowercase ) > 1:
lowerCamelCase_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCamelCase_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCamelCase_ = max_input_flow
lowerCamelCase_ = 0
lowerCamelCase_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCamelCase_ = max_input_flow
lowerCamelCase_ = size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = algorithm(self )
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase ) -> List[Any]:
lowerCamelCase_ = flow_network
lowerCamelCase_ = flow_network.verticesCount
lowerCamelCase_ = flow_network.sourceIndex
lowerCamelCase_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCamelCase_ = flow_network.graph
lowerCamelCase_ = False
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if not self.executed:
self._algorithm()
lowerCamelCase_ = True
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> Any:
super().__init__(lowercase )
# use this to save your result
lowerCamelCase_ = -1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
lowerCamelCase_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCamelCase_ = [0] * self.verticies_count
lowerCamelCase_ = [0] * self.verticies_count
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCamelCase_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCamelCase_ = 0
while i < len(lowercase ):
lowerCamelCase_ = vertices_list[i]
lowerCamelCase_ = self.heights[vertex_index]
self.process_vertex(lowercase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowercase ) )
lowerCamelCase_ = 0
else:
i += 1
lowerCamelCase_ = sum(self.preflow[self.source_index] )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase , lowercase )
self.relabel(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Any:
lowerCamelCase_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict:
lowerCamelCase_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCamelCase_ = self.heights[to_index]
if min_height is not None:
lowerCamelCase_ = min_height + 1
if __name__ == "__main__":
__A =[0]
__A =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 19 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__A =pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(lowercase ) for x in np.arange(30 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase )
lowerCamelCase_ = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(lowercase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=lowercase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
self.assertRaises(lowercase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
import faiss
lowerCamelCase_ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase ):
lowerCamelCase_ = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=lowercase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( lowerCamelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase_ = "index.faiss"
lowerCamelCase_ = F'mock://{index_name}'
index.save(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCamelCase__ , storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCamelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {"acknowledged": True}
lowerCamelCase_ = ElasticSearchIndex(es_client=lowercase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = "foo"
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(lowercase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
# batched queries with timeout
lowerCamelCase_ = ["foo", "bar", "foobar"]
lowerCamelCase_ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(lowercase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase )
| 19 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_pad
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False ) -> Optional[Any]:
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase_ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase_ = self.size["shortest_edge"]
lowerCamelCase_ = self.size["shortest_edge"]
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(lowercase , key=lambda lowercase : item[0] )[0]
lowerCamelCase_ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = YolosImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = YolosImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(lowercase , return_tensors="pt" ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
# Initialize image_processings
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase_ = self.image_processing_class(do_resize=lowercase , do_normalize=lowercase , do_rescale=lowercase )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCamelCase_ = image_processing_a.pad(lowercase , return_tensors="pt" )
lowerCamelCase_ = image_processing_a(lowercase , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> int:
# prepare image and target
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"image_id": 39769, "annotations": target}
# encode them
lowerCamelCase_ = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
lowerCamelCase_ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
# prepare image, target and masks_path
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowerCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase_ = YolosImageProcessor(format="coco_panoptic" )
lowerCamelCase_ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
lowerCamelCase_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 19 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = self.vocab_size - 1
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = OpenAIGPTLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = OpenAIGPTDoubleHeadsModel(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> int:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = OpenAIGPTForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Any:
lowerCamelCase_ = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = inputs_dict["labels"]
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase , )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = OpenAIGPTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , n_embd=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = OpenAIGPTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase )
lowerCamelCase_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase ) # the president is
lowerCamelCase_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase_ = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 19 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __lt__( self , lowercase ) -> List[Any]:
return self[-1] < other[-1]
def __eq__( self , lowercase ) -> int:
return self[-1] == other[-1]
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
# sort into stacks
for element in collection:
lowerCamelCase_ = Stack([element] )
lowerCamelCase_ = bisect_left(lowerCamelCase__ , lowerCamelCase__ )
if i != len(lowerCamelCase__ ):
stacks[i].append(lowerCamelCase__ )
else:
stacks.append(lowerCamelCase__ )
# use a heap-based merge to merge stack efficiently
lowerCamelCase_ = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
__A =input('''Enter numbers separated by a comma:\n''').strip()
__A =[int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 19 |
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''spiece.model'''}
__A ={
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
__A ={
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
__A =0
__A =1
__A =2
__A =3
__A =4
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = 'left'
def __init__( self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCamelCase_ = 3
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> int:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
if self.remove_space:
lowerCamelCase_ = " ".join(inputs.strip().split() )
else:
lowerCamelCase_ = inputs
lowerCamelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase_ = unicodedata.normalize("NFKD" , lowercase )
lowerCamelCase_ = "".join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
lowerCamelCase_ = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = self.preprocess_text(lowercase )
lowerCamelCase_ = self.sp_model.encode(lowercase , out_type=lowercase )
lowerCamelCase_ = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase_ = cur_pieces[1:]
else:
lowerCamelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
return self.sp_model.PieceToId(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
return self.sp_model.IdToPiece(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tuple:
lowerCamelCase_ = "".join(lowercase ).replace(lowercase , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False , lowercase = None , lowercase = True , **lowercase , ) -> str:
lowerCamelCase_ = kwargs.pop("use_source_tokenizer" , lowercase )
lowerCamelCase_ = self.convert_ids_to_tokens(lowercase , skip_special_tokens=lowercase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ = []
lowerCamelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase ) )
lowerCamelCase_ = []
sub_texts.append(lowercase )
else:
current_sub_text.append(lowercase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase_ = "".join(lowercase )
lowerCamelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ = self.clean_up_tokenization(lowercase )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 19 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 19 | 1 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1_3_3_7 , num_examples=4_2 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = split_dict._to_yaml_list()
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
lowerCamelCase_ = SplitDict._from_yaml_list(lowerCamelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCamelCase_ = None
# the split name of split_dict takes over the name of the split info object
lowerCamelCase_ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=lowerCamelCase__ ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCamelCase_ ( lowerCamelCase__ ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCamelCase_ = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 19 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if b == 0:
return (1, 0)
((lowerCamelCase_) , (lowerCamelCase_)) = extended_euclid(lowerCamelCase__ , a % b )
lowerCamelCase_ = a // b
return (y, x - k * y)
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
((lowerCamelCase_) , (lowerCamelCase_)) = extended_euclid(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = na * na
lowerCamelCase_ = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
((lowerCamelCase_) , (lowerCamelCase_)) = extended_euclid(lowerCamelCase__ , lowerCamelCase__ )
if b < 0:
lowerCamelCase_ = (b % n + n) % n
return b
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = invert_modulo(lowerCamelCase__ , lowerCamelCase__ ), invert_modulo(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = na * na
lowerCamelCase_ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='''Enter the base and the power separated by a comma: '''
__A, __A =map(int, input(prompt).split(''','''))
__A, __A =map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 19 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 1 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
lowerCamelCase_ = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCamelCase_ = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> int:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
raise NotImplementedError()
| 19 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.