code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@slow
@require_torch
def __lowercase ( self ) -> Dict:
_a : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
_a : str = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_a : str = bertabert.config.encoder.vocab_size
_a : int = tokenizer.sep_token_id
_a : List[Any] = tokenizer.cls_token_id
_a : Tuple = 1_2_8
_a : Union[str, Any] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
_a : Optional[int] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
_a : Union[str, Any] = train_dataset.select(range(3_2 ) )
_a : List[Any] = val_dataset.select(range(1_6 ) )
_a : Tuple = 4
def _map_to_encoder_decoder_inputs(_a ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_a : List[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_a , max_length=5_1_2 )
_a : List[Any] = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_a , max_length=1_2_8 )
_a : Any = inputs.input_ids
_a : List[str] = inputs.attention_mask
_a : List[Any] = outputs.input_ids
_a : Any = outputs.input_ids.copy()
_a : Tuple = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_a : Optional[int] = outputs.attention_mask
assert all(len(_a ) == 5_1_2 for x in inputs.input_ids )
assert all(len(_a ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(_a ):
_a : int = pred.label_ids
_a : int = pred.predictions
# all unnecessary tokens are removed
_a : int = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_a : Union[str, Any] = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_a : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a )
return {"accuracy": accuracy}
# map train dataset
_a : List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
_a : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
_a : Union[str, Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = SeqaSeqTrainingArguments(
output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy='''steps''' , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_a : str = SeqaSeqTrainer(
model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , )
# start training
trainer.train()
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[Any] ,__a : Union[str, Any]=8 ) -> Tuple:
"""simple docstring"""
_a : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_a : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a , ) -> List[str]:
super().__init__()
self.register_modules(
unet=_a , scheduler=_a , movq=_a , )
_a : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self , _a , _a , _a , _a , _a , _a ) -> List[str]:
if latents is None:
_a : List[Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_a : Union[str, Any] = latents.to(_a )
_a : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self , _a=0 ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_a : List[str] = torch.device(F"""cuda:{gpu_id}""" )
_a : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def __lowercase ( self , _a=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_a : int = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_a : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_a , _a : int = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
# We'll offload the last model manually.
_a : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self ) -> Union[str, Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a , _a , _a = 5_1_2 , _a = 5_1_2 , _a = 1_0_0 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ) -> Optional[int]:
_a : List[Any] = self._execution_device
_a : int = guidance_scale > 1.0
if isinstance(_a , _a ):
_a : Optional[int] = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
_a : Union[str, Any] = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
_a : Optional[Any] = torch.cat(_a , dim=0 )
_a : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_a : Optional[Any] = image_embeds.repeat_interleave(_a , dim=0 )
_a : Any = negative_image_embeds.repeat_interleave(_a , dim=0 )
_a : Union[str, Any] = hint.repeat_interleave(_a , dim=0 )
_a : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
_a : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
_a : Optional[Any] = self.scheduler.timesteps
_a : str = self.movq.config.latent_channels
_a , _a : Union[str, Any] = downscale_height_and_width(_a , _a , self.movq_scale_factor )
# create initial latent
_a : Optional[int] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_a : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
_a : str = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
_a , _a : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
_a , _a : Any = noise_pred.chunk(2 )
_a , _a : Optional[int] = variance_pred.chunk(2 )
_a : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_a : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_a , _a : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_a : str = self.scheduler.step(
_a , _a , _a , generator=_a , )[0]
# post-processing
_a : Optional[Any] = self.movq.decode(_a , force_not_quantize=_a )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_a : Optional[int] = image * 0.5 + 0.5
_a : Tuple = image.clamp(0 , 1 )
_a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_a : Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
import csv
import tweepy
# Twitter API credentials
a__ = ''''''
a__ = ''''''
a__ = ''''''
a__ = ''''''
def __UpperCAmelCase ( __a : str ) -> None:
"""simple docstring"""
_a : int = tweepy.OAuthHandler(__a ,__a )
auth.set_access_token(__a ,__a )
_a : Union[str, Any] = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
_a : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
_a : List[str] = api.user_timeline(screen_name=__a ,count=200 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
_a : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
_a : str = api.user_timeline(
screen_name=__a ,count=200 ,max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
_a : Optional[int] = alltweets[-1].id - 1
print(F"""...{len(__a )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
_a : List[str] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" ,'''w''' ) as f:
_a : List[str] = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
a__ = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
a__ = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
a__ = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ,__a : Tuple ,__a : bool ,__a : Optional[Dict[int, int]] = None ,__a : bool = False ,) -> Any:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_a : Dict = new_id
# turn into Numpy arrays
_a : Union[str, Any] = np.array(__a )
_a : Optional[Any] = np.array(__a )
if reduce_labels:
_a : List[str] = 255
_a : Optional[int] = label - 1
_a : int = 255
_a : str = label != ignore_index
_a : List[Any] = np.not_equal(__a ,__a )
_a : str = pred_label[mask]
_a : List[Any] = np.array(__a )[mask]
_a : Union[str, Any] = pred_label[pred_label == label]
_a : List[str] = np.histogram(__a ,bins=__a ,range=(0, num_labels - 1) )[0]
_a : Union[str, Any] = np.histogram(__a ,bins=__a ,range=(0, num_labels - 1) )[0]
_a : Optional[Any] = np.histogram(__a ,bins=__a ,range=(0, num_labels - 1) )[0]
_a : Dict = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCAmelCase ( __a : Tuple ,__a : Union[str, Any] ,__a : Any ,__a : bool ,__a : Optional[Dict[int, int]] = None ,__a : bool = False ,) -> Tuple:
"""simple docstring"""
_a : List[str] = np.zeros((num_labels,) ,dtype=np.floataa )
_a : str = np.zeros((num_labels,) ,dtype=np.floataa )
_a : str = np.zeros((num_labels,) ,dtype=np.floataa )
_a : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(__a ,__a ):
_a , _a , _a , _a : List[str] = intersect_and_union(
__a ,__a ,__a ,__a ,__a ,__a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Tuple ,__a : List[Any] ,__a : bool ,__a : Optional[int] = None ,__a : Optional[Dict[int, int]] = None ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a , _a : Union[str, Any] = total_intersect_and_union(
__a ,__a ,__a ,__a ,__a ,__a )
# compute metrics
_a : Union[str, Any] = {}
_a : Tuple = total_area_intersect.sum() / total_area_label.sum()
_a : int = total_area_intersect / total_area_union
_a : Optional[int] = total_area_intersect / total_area_label
_a : Tuple = np.nanmean(__a )
_a : str = np.nanmean(__a )
_a : Any = all_acc
_a : Dict = iou
_a : str = acc
if nan_to_num is not None:
_a : List[Any] = {metric: np.nan_to_num(__a ,nan=__a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def __lowercase ( self , _a , _a , _a , _a , _a = None , _a = None , _a = False , ) -> Any:
_a : List[Any] = mean_iou(
results=_a , gt_seg_maps=_a , num_labels=_a , ignore_index=_a , nan_to_num=_a , label_map=_a , reduce_labels=_a , )
return iou_result
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from math import ceil, sqrt
def __UpperCAmelCase ( __a : int = 1_000_000 ) -> int:
"""simple docstring"""
_a : Any = 0
for outer_width in range(3 ,(limit // 4) + 2 ):
if outer_width**2 > limit:
_a : Any = max(ceil(sqrt(outer_width**2 - limit ) ) ,1 )
else:
_a : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (DDIMParallelScheduler,)
UpperCAmelCase__ : int = (("eta", 0.0), ("num_inference_steps", 50))
def __lowercase ( self , **_a ) -> Dict:
_a : int = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_a )
return config
def __lowercase ( self , **_a ) -> Union[str, Any]:
_a : List[Any] = self.scheduler_classes[0]
_a : Dict = self.get_scheduler_config(**_a )
_a : Dict = scheduler_class(**_a )
_a , _a : Any = 1_0, 0.0
_a : Optional[int] = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for t in scheduler.timesteps:
_a : Tuple = model(_a , _a )
_a : str = scheduler.step(_a , _a , _a , _a ).prev_sample
return sample
def __lowercase ( self ) -> Tuple:
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> Dict:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_a )
_a : int = self.scheduler_classes[0]
_a : Any = self.get_scheduler_config(steps_offset=1 )
_a : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __lowercase ( self ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __lowercase ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowercase ( self ) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowercase ( self ) -> Dict:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_a )
def __lowercase ( self ) -> Optional[int]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_a )
def __lowercase ( self ) -> Optional[Any]:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __lowercase ( self ) -> Optional[int]:
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=_a )
def __lowercase ( self ) -> int:
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=_a , num_inference_steps=_a )
def __lowercase ( self ) -> Tuple:
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_a , eta=_a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config()
_a : int = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def __lowercase ( self ) -> List[str]:
_a : List[Any] = self.scheduler_classes[0]
_a : Union[str, Any] = self.get_scheduler_config()
_a : Tuple = scheduler_class(**_a )
_a , _a : Union[str, Any] = 1_0, 0.0
scheduler.set_timesteps(_a )
_a : int = self.dummy_model()
_a : int = self.dummy_sample_deter
_a : List[str] = self.dummy_sample_deter + 0.1
_a : List[str] = self.dummy_sample_deter - 0.1
_a : int = samplea.shape[0]
_a : int = torch.stack([samplea, samplea, samplea] , dim=0 )
_a : Optional[int] = torch.arange(_a )[0:3, None].repeat(1 , _a )
_a : List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a : Dict = scheduler.batch_step_no_noise(_a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _a )
_a : List[Any] = torch.sum(torch.abs(_a ) )
_a : str = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = self.full_loop()
_a : Optional[Any] = torch.sum(torch.abs(_a ) )
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def __lowercase ( self ) -> Optional[int]:
_a : Optional[int] = self.full_loop(prediction_type='''v_prediction''' )
_a : Union[str, Any] = torch.sum(torch.abs(_a ) )
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def __lowercase ( self ) -> Dict:
# We specify different beta, so that the first alpha is 0.99
_a : Any = self.full_loop(set_alpha_to_one=_a , beta_start=0.01 )
_a : str = torch.sum(torch.abs(_a ) )
_a : Union[str, Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def __lowercase ( self ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
_a : List[Any] = self.full_loop(set_alpha_to_one=_a , beta_start=0.01 )
_a : str = torch.sum(torch.abs(_a ) )
_a : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
a__ = '''pytorch_model.bin'''
a__ = '''pytorch_model.bin.index.json'''
a__ = '''adapter_config.json'''
a__ = '''adapter_model.bin'''
a__ = '''adapter_model.safetensors'''
a__ = '''tf_model.h5'''
a__ = '''tf_model.h5.index.json'''
a__ = '''model.ckpt'''
a__ = '''flax_model.msgpack'''
a__ = '''flax_model.msgpack.index.json'''
a__ = '''model.safetensors'''
a__ = '''model.safetensors.index.json'''
a__ = '''config.json'''
a__ = '''preprocessor_config.json'''
a__ = FEATURE_EXTRACTOR_NAME
a__ = '''generation_config.json'''
a__ = '''modelcard.json'''
a__ = '''▁'''
a__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
a__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
a__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
a__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __UpperCAmelCase ( __a : int ) -> Tuple:
"""simple docstring"""
if version.parse(__a ) < version.parse(__a ):
if "dev" in min_version:
_a : Tuple = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
_a : List[str] = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import math
def __UpperCAmelCase ( __a : float ,__a : float ) -> float:
"""simple docstring"""
if (
not isinstance(__a ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __UpperCAmelCase ( __a : float ,__a : float ) -> float:
"""simple docstring"""
if (
not isinstance(__a ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> Optional[int]:
"""simple docstring"""
_a : str = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : int = MaskFormerConfig(backbone_config=__a )
_a : Union[str, Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Union[str, Any] = 847
_a : str = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : int = 150
_a : Optional[int] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : Optional[int] = 171
_a : Optional[Any] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : str = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : Optional[Any] = 19
_a : Optional[int] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[str] = 65
_a : List[str] = '''mapillary-vistas-id2label.json'''
_a : List[Any] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : List[str] = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : Any ,__a : Tuple ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Any = dct.pop(__a )
_a : int = val
def __UpperCAmelCase ( __a : Optional[int] ,__a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : Any = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : int = in_proj_weight[:dim, :]
_a : int = in_proj_bias[: dim]
_a : Tuple = in_proj_weight[
dim : dim * 2, :
]
_a : Optional[Any] = in_proj_bias[
dim : dim * 2
]
_a : List[str] = in_proj_weight[
-dim :, :
]
_a : Dict = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : Tuple ,__a : Tuple ) -> Dict:
"""simple docstring"""
_a : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : str = in_proj_weight[: hidden_size, :]
_a : Dict = in_proj_bias[:config.hidden_size]
_a : Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
_a : Dict = in_proj_weight[-hidden_size :, :]
_a : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : str = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : List[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
_a : int = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Optional[int] = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Optional[Any]:
"""simple docstring"""
_a : Union[str, Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : int = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Tuple = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[Any] = torch.from_numpy(__a )
# load 🤗 model
_a : Union[str, Any] = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : List[Any] = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : int = prepare_img()
if "vistas" in model_name:
_a : Optional[Any] = 65
elif "cityscapes" in model_name:
_a : List[str] = 65_535
else:
_a : Tuple = 255
_a : int = True if '''ade''' in model_name else False
_a : Optional[int] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : List[str] = image_processor(__a ,return_tensors='''pt''' )
_a : Dict = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Dict = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
a__ = 0
a__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
a__ = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a , _a , _a , _a , ) -> None:
_a : Any = pos_x
_a : List[str] = pos_y
_a : Tuple = (pos_y, pos_x)
_a : List[str] = goal_x
_a : Union[str, Any] = goal_y
_a : Optional[Any] = g_cost
_a : Union[str, Any] = parent
_a : List[Any] = self.calculate_heuristic()
_a : int = self.g_cost + self.h_cost
def __lowercase ( self ) -> float:
_a : Optional[int] = self.pos_x - self.goal_x
_a : str = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_a ) + abs(_a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _a ) -> bool:
return self.f_cost < other.f_cost
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a ) -> Dict:
_a : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
_a : Optional[int] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _a )
_a : str = [self.start]
_a : list[Node] = []
_a : int = False
def __lowercase ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_a : Any = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_a )
self.closed_nodes.append(_a )
_a : Dict = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
_a : List[Any] = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
return [self.start.pos]
def __lowercase ( self , _a ) -> list[Node]:
_a : Tuple = []
for action in delta:
_a : Optional[int] = parent.pos_x + action[1]
_a : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def __lowercase ( self , _a ) -> list[TPosition]:
_a : str = node
_a : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Any = current_node.parent
path.reverse()
return path
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a ) -> None:
_a : Union[str, Any] = AStar(_a , _a )
_a : List[Any] = AStar(_a , _a )
_a : Any = False
def __lowercase ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_a : Dict = self.fwd_astar.open_nodes.pop(0 )
_a : Any = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_a , _a )
self.fwd_astar.closed_nodes.append(_a )
self.bwd_astar.closed_nodes.append(_a )
_a : Dict = current_bwd_node
_a : Any = current_fwd_node
_a : Tuple = {
self.fwd_astar: self.fwd_astar.get_successors(_a ),
self.bwd_astar: self.bwd_astar.get_successors(_a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_a )
else:
# retrieve the best current path
_a : Tuple = astar.open_nodes.pop(
astar.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_a )
else:
astar.open_nodes.append(_a )
return [self.fwd_astar.start.pos]
def __lowercase ( self , _a , _a ) -> list[TPosition]:
_a : List[str] = self.fwd_astar.retrace_path(_a )
_a : int = self.bwd_astar.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
_a : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
a__ = (0, 0)
a__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__ = time.time()
a__ = AStar(init, goal)
a__ = a_star.search()
a__ = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
a__ = time.time()
a__ = BidirectionalAStar(init, goal)
a__ = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __UpperCAmelCase ( __a : Tuple ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = AutoConfig.from_pretrained(__a )
_a : Union[str, Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__a )
_a : Optional[Any] = checkpoints.load_tax_checkpoint(__a )
_a : Tuple = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp''']
if config.model_type == "t5":
_a : Optional[int] = '''SelfAttention'''
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_a : int = '''LocalSelfAttention'''
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Optional[Any] = '''TransientGlobalSelfAttention'''
else:
raise ValueError(
'''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'''
''' attribute with a value from [\'local\', \'transient-global].''' )
# Encoder
for layer_index in range(config.num_layers ):
_a : Optional[int] = F"""layers_{str(__a )}"""
# Self-Attention
_a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel''']
_a : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel''']
_a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel''']
_a : Dict = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel''']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale''']
# Layer Normalization
_a : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale''']
if split_mlp_wi:
_a : Optional[Any] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_a : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_a : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_a : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_a : str = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_a : List[Any] = flax_model.params['''encoder''']['''block'''][str(__a )]['''layer''']
_a : Union[str, Any] = tax_attention_key
_a : int = tax_attention_out
_a : str = tax_attention_query
_a : Optional[Any] = tax_attention_value
_a : Union[str, Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : Dict = tax_global_layer_norm
if split_mlp_wi:
_a : Any = tax_mlp_wi_a
_a : int = tax_mlp_wi_a
else:
_a : Dict = tax_mlp_wi
_a : int = tax_mlp_wo
_a : Tuple = tax_mlp_layer_norm
_a : List[Any] = flax_model_encoder_layer_block
# Only for layer 0:
_a : Tuple = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T
_a : Tuple = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_a : int = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T
_a : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_a : List[Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale''']
_a : Optional[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_a : str = F"""layers_{str(__a )}"""
# Self-Attention
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel''']
_a : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel''']
_a : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel''']
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel''']
# Layer Normalization
_a : str = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][
'''scale'''
]
# Encoder-Decoder-Attention
_a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention''']
_a : Optional[int] = tax_enc_dec_attention_module['''key''']['''kernel''']
_a : str = tax_enc_dec_attention_module['''out''']['''kernel''']
_a : List[Any] = tax_enc_dec_attention_module['''query''']['''kernel''']
_a : str = tax_enc_dec_attention_module['''value''']['''kernel''']
# Layer Normalization
_a : Any = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale''']
# MLP
if split_mlp_wi:
_a : str = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel''']
_a : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel''']
else:
_a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel''']
_a : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel''']
# Layer Normalization
_a : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale''']
# Assigning
_a : Optional[Any] = flax_model.params['''decoder''']['''block'''][str(__a )]['''layer''']
_a : List[Any] = tax_attention_key
_a : Optional[int] = tax_attention_out
_a : str = tax_attention_query
_a : Tuple = tax_attention_value
_a : List[str] = tax_pre_attention_layer_norm
_a : Any = tax_enc_dec_attention_key
_a : Dict = tax_enc_dec_attention_out
_a : Optional[Any] = tax_enc_dec_attention_query
_a : List[str] = tax_enc_dec_attention_value
_a : Dict = tax_cross_layer_norm
if split_mlp_wi:
_a : Optional[int] = tax_mlp_wi_a
_a : Dict = tax_mlp_wi_a
else:
_a : Optional[Any] = tax_mlp_wi
_a : Any = tax_mlp_wo
_a : Optional[Any] = txa_mlp_layer_norm
_a : Union[str, Any] = flax_model_decoder_layer_block
# Decoder Normalization
_a : Tuple = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale''']
_a : str = txa_decoder_norm
# Only for layer 0:
_a : Dict = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T
_a : str = tax_decoder_rel_embedding
# Token Embeddings
_a : Optional[Any] = tax_model['''target''']['''token_embedder''']['''embedding''']
_a : Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_a : Any = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel''']
flax_model.save_pretrained(__a )
print('''T5X Model was sucessfully converted!''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : jnp.ndarray
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , __lowercase , __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = 32
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase__ : Union[bool, Tuple[bool]] = False
UpperCAmelCase__ : Tuple[int] = (320, 640, 1280, 1280)
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : Union[int, Tuple[int]] = 8
UpperCAmelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCAmelCase__ : int = 1280
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = False
def __lowercase ( self , _a ) -> FrozenDict:
# init input tensors
_a : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
_a : Tuple = jnp.zeros(_a , dtype=jnp.floataa )
_a : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
_a : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_a , _a : Union[str, Any] = jax.random.split(_a )
_a : str = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_a , _a , _a , _a )["params"]
def __lowercase ( self ) -> Any:
_a : Tuple = self.block_out_channels
_a : int = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_a : Any = self.num_attention_heads or self.attention_head_dim
# input
_a : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_a : str = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_a : Optional[Any] = FlaxTimestepEmbedding(_a , dtype=self.dtype )
_a : List[Any] = self.only_cross_attention
if isinstance(_a , _a ):
_a : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
_a : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
_a : Optional[Any] = []
_a : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_a : Optional[Any] = output_channel
_a : Optional[int] = block_out_channels[i]
_a : List[Any] = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_a : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_a : Any = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
_a : Union[str, Any] = down_blocks
# mid
_a : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_a : Union[str, Any] = []
_a : List[Any] = list(reversed(_a ) )
_a : Union[str, Any] = list(reversed(_a ) )
_a : Tuple = list(reversed(_a ) )
_a : str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_a : List[Any] = output_channel
_a : Optional[Any] = reversed_block_out_channels[i]
_a : Optional[int] = reversed_block_out_channels[min(i + 1 , len(_a ) - 1 )]
_a : str = i == len(_a ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_a : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=_a , out_channels=_a , prev_output_channel=_a , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_a : List[Any] = FlaxUpBlockaD(
in_channels=_a , out_channels=_a , prev_output_channel=_a , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_a )
_a : Optional[int] = output_channel
_a : Dict = up_blocks
# out
_a : List[str] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
_a : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _a , _a , _a , _a=None , _a=None , _a = True , _a = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(_a , jnp.ndarray ):
_a : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
_a : Optional[int] = timesteps.astype(dtype=jnp.floataa )
_a : str = jnp.expand_dims(_a , 0 )
_a : List[Any] = self.time_proj(_a )
_a : List[str] = self.time_embedding(_a )
# 2. pre-process
_a : List[Any] = jnp.transpose(_a , (0, 2, 3, 1) )
_a : str = self.conv_in(_a )
# 3. down
_a : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
_a , _a : List[str] = down_block(_a , _a , _a , deterministic=not train )
else:
_a , _a : int = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_a : str = ()
for down_block_res_sample, down_block_additional_residual in zip(
_a , _a ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_a : List[str] = new_down_block_res_samples
# 4. mid
_a : Dict = self.mid_block(_a , _a , _a , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_a : Optional[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
_a : str = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_a , _a ):
_a : List[str] = up_block(
_a , temb=_a , encoder_hidden_states=_a , res_hidden_states_tuple=_a , deterministic=not train , )
else:
_a : Dict = up_block(_a , temb=_a , res_hidden_states_tuple=_a , deterministic=not train )
# 6. post-process
_a : Tuple = self.conv_norm_out(_a )
_a : int = nn.silu(_a )
_a : Optional[Any] = self.conv_out(_a )
_a : List[Any] = jnp.transpose(_a , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_a )
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "time_series_transformer"
UpperCAmelCase__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 3_2 , _a = 3_2 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 6_4 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 1_0_0 , _a = 0.02 , _a=True , **_a , ) -> Optional[int]:
# time series specific configuration
_a : Optional[Any] = prediction_length
_a : Dict = context_length or prediction_length
_a : Optional[Any] = distribution_output
_a : Dict = loss
_a : Dict = input_size
_a : Dict = num_time_features
_a : Any = lags_sequence
_a : Optional[Any] = scaling
_a : Any = num_dynamic_real_features
_a : Any = num_static_real_features
_a : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
_a : List[Any] = cardinality
else:
_a : Optional[int] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
_a : str = embedding_dimension
else:
_a : Optional[int] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_a : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
_a : Any = input_size * len(_a ) + self._number_of_features
_a : int = d_model
_a : int = encoder_attention_heads
_a : Dict = decoder_attention_heads
_a : Optional[int] = encoder_ffn_dim
_a : Union[str, Any] = decoder_ffn_dim
_a : Dict = encoder_layers
_a : Tuple = decoder_layers
_a : List[Any] = dropout
_a : Tuple = attention_dropout
_a : Any = activation_dropout
_a : int = encoder_layerdrop
_a : int = decoder_layerdrop
_a : Tuple = activation_function
_a : Optional[Any] = init_std
_a : int = use_cache
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __lowercase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
a__ = 256
# Modulus to hash a string
a__ = 1000003
def __UpperCAmelCase ( __a : str ,__a : str ) -> bool:
"""simple docstring"""
_a : str = len(__a )
_a : List[str] = len(__a )
if p_len > t_len:
return False
_a : Optional[Any] = 0
_a : List[Any] = 0
_a : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(__a ):
_a : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_a : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a : int = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = '''abc1abc12'''
_a : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_a : Optional[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__a ,__a ) and not rabin_karp(__a ,__a )
# Test 2)
_a : str = '''ABABX'''
_a : List[str] = '''ABABZABABYABABX'''
assert rabin_karp(__a ,__a )
# Test 3)
_a : Dict = '''AAAB'''
_a : List[Any] = '''ABAAAAAB'''
assert rabin_karp(__a ,__a )
# Test 4)
_a : Optional[Any] = '''abcdabcy'''
_a : str = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__a ,__a )
# Test 5)
_a : Union[str, Any] = '''Lü'''
_a : Any = '''Lüsai'''
assert rabin_karp(__a ,__a )
_a : Tuple = '''Lue'''
assert not rabin_karp(__a ,__a )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def __lowercase ( *_a , **_a ) -> Tuple:
pass
def __UpperCAmelCase ( __a : Any ) -> Dict:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a__ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowercase ( self , _a , _a , _a ) -> Union[str, Any]:
_a : Union[str, Any] = pipeline(
'''document-question-answering''' , model=_a , tokenizer=_a , image_processor=_a )
_a : str = INVOICE_URL
_a : List[Any] = list(zip(*apply_tesseract(load_image(_a ) , _a , '''''' ) ) )
_a : int = '''What is the placebo?'''
_a : Tuple = [
{
'''image''': load_image(_a ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __lowercase ( self , _a , _a ) -> List[str]:
_a : Any = dqa_pipeline(_a , top_k=2 )
self.assertEqual(
_a , [
[
{'''score''': ANY(_a ), '''answer''': ANY(_a ), '''start''': ANY(_a ), '''end''': ANY(_a )},
{'''score''': ANY(_a ), '''answer''': ANY(_a ), '''start''': ANY(_a ), '''end''': ANY(_a )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase ( self ) -> Dict:
_a : Dict = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_a : List[str] = INVOICE_URL
_a : Optional[Any] = '''How many cats are there?'''
_a : Tuple = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 3_8, '''end''': 3_9},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 3_8, '''end''': 4_0},
]
_a : Optional[int] = dqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(nested_simplify(_a , decimals=4 ) , _a )
_a : List[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(_a , decimals=4 ) , _a )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_a : List[str] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a : Any = dqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(_a , [] )
# We can optionnally pass directly the words and bounding boxes
_a : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_a : int = []
_a : int = []
_a : Union[str, Any] = dqa_pipeline(image=_a , question=_a , words=_a , boxes=_a , top_k=2 )
self.assertEqual(_a , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase ( self ) -> Union[str, Any]:
_a : str = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_a : Optional[Any] = INVOICE_URL
_a : Any = '''What is the invoice number?'''
_a : int = dqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_a : Optional[int] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_a : Dict = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowercase ( self ) -> Dict:
_a : List[str] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=5_0 , )
_a : Union[str, Any] = INVOICE_URL
_a : Optional[Any] = '''What is the invoice number?'''
_a : str = dqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_a : Optional[int] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_a : List[str] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowercase ( self ) -> str:
_a : Any = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_a )
_a : Optional[int] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_a , revision='''3dc6de3''' , )
_a : List[str] = INVOICE_URL
_a : Optional[Any] = '''What is the invoice number?'''
_a : List[Any] = dqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_a : List[str] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_a : Any = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
]
]
* 2 , )
_a : List[str] = list(zip(*apply_tesseract(load_image(_a ) , _a , '''''' ) ) )
# This model should also work if `image` is set to None
_a : List[str] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_a )
_a : List[Any] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_a , revision='''3dc6de3''' , max_seq_len=5_0 , )
_a : Union[str, Any] = INVOICE_URL
_a : int = '''What is the invoice number?'''
_a : Any = dqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_a : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
_a : str = list(zip(*apply_tesseract(load_image(_a ) , _a , '''''' ) ) )
# This model should also work if `image` is set to None
_a : Tuple = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
@slow
@require_torch
def __lowercase ( self ) -> List[Any]:
_a : Any = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_a : List[str] = INVOICE_URL
_a : Tuple = '''What is the invoice number?'''
_a : int = dqa_pipeline(image=_a , question=_a , top_k=2 )
self.assertEqual(nested_simplify(_a , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> str:
_a : Optional[int] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_a : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_a : Optional[Any] = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
_a : Dict = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_a : str = model(_a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1e-3 ) )
@slow
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
_a : List[str] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_a : Optional[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
_a : Union[str, Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_a : str = model(_a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1e-3 ) )
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( __a : Tuple ) -> str:
"""simple docstring"""
return EnvironmentCommand()
def __UpperCAmelCase ( __a : Union[str, Any] ) -> str:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@staticmethod
def __lowercase ( _a ) -> int:
_a : str = parser.add_parser('''env''' )
download_parser.set_defaults(func=_a )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_a , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_a )
def __init__( self , _a , *_a ) -> None:
_a : Union[str, Any] = accelerate_config_file
def __lowercase ( self ) -> Optional[int]:
_a : Tuple = '''not installed'''
if is_safetensors_available():
import safetensors
_a : Dict = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_a : str = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_a : Optional[Any] = '''not installed'''
_a : List[Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_a : Optional[int] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_a ):
_a : Optional[int] = load_config_from_file(self._accelerate_config_file ).to_dict()
_a : Optional[int] = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_a , _a )
else F"""\t{accelerate_config}"""
)
_a : Tuple = '''not installed'''
_a : Tuple = '''NA'''
if is_torch_available():
import torch
_a : str = torch.__version__
_a : int = torch.cuda.is_available()
_a : List[Any] = '''not installed'''
_a : Optional[Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
_a : int = tf.__version__
try:
# deprecated in v2.1
_a : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_a : List[str] = bool(tf.config.list_physical_devices('''GPU''' ) )
_a : Dict = '''not installed'''
_a : Optional[Any] = '''not installed'''
_a : Optional[Any] = '''not installed'''
_a : List[Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_a : Optional[Any] = flax.__version__
_a : Union[str, Any] = jax.__version__
_a : Union[str, Any] = jaxlib.__version__
_a : Tuple = jax.lib.xla_bridge.get_backend().platform
_a : Tuple = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_a ) )
return info
@staticmethod
def __lowercase ( _a ) -> Optional[int]:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
a__ = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , _a , _a=False , _a=False , _a=False , _a=None , _a=None , _a=None , _a=None , _a = None , **_a , ) -> None:
_a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
_a : Union[str, Any] = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
_a : Union[str, Any] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_a : Any = '''<|endoftext|>''' if eos_token is None else eos_token
_a : Any = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_a : Union[str, Any] = unk_token if pad_token is None else pad_token
_a : Tuple = eos_token if bos_token is None else bos_token
else:
_a : Optional[Any] = '''<pad>''' if pad_token is None else pad_token
_a : Union[str, Any] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : Optional[Any] = do_lower_case
_a : Any = remove_space
_a : List[Any] = keep_accents
_a : Tuple = vocab_file
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# Used for whitespace normalization in input texts
# fmt : off
_a : Union[str, Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_a : Any = re.compile(
F"""[{''.join(map(_a , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self ) -> List[str]:
_a : Optional[Any] = self.__dict__.copy()
_a : Any = None
return state
def __setstate__( self , _a ) -> Dict:
_a : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Tuple = {}
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowercase ( self ) -> int:
return len(self.sp_model )
def __lowercase ( self , _a ) -> str:
_a : Optional[int] = self.non_printing_characters_re.sub('''''' , _a )
# Normalize whitespaces
_a : Dict = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
_a : Optional[Any] = unicodedata.normalize('''NFC''' , _a )
return text
def __lowercase ( self , _a , **_a ) -> List[str]:
_a : Optional[Any] = self.preprocess_text(_a )
return self.sp_model.encode(_a , out_type=_a )
def __lowercase ( self , _a ) -> int:
return self.sp_model.PieceToId(_a )
def __lowercase ( self , _a ) -> str:
return self.sp_model.IdToPiece(_a )
@staticmethod
def __lowercase ( _a ) -> str:
return out_string
def __lowercase ( self , _a ) -> str:
_a : int = []
_a : Any = ''''''
_a : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_a : Tuple = True
_a : List[str] = []
else:
current_sub_tokens.append(_a )
_a : Any = False
out_string += self.sp_model.decode(_a )
return out_string
def __lowercase ( self ) -> Dict[str, int]:
_a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Any = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __lowercase ( self , _a , _a = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(_a , _a ):
_a : Tuple = self.preprocess_text(_a )
_a : int = self.sp_model.encode(_a )
else:
_a : Dict = [self.preprocess_text(_a ) for t in text]
_a : Optional[int] = self.sp_model.encode(_a )
if return_tensors is True or return_tensors == "pt":
_a : List[str] = torch.tensor(_a )
return token_ids
def __lowercase ( self , _a ) -> str:
return self.sp_model.decode(_a )
def __lowercase ( self , _a ) -> List[int]:
_a : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
_a : Optional[int] = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(_a ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=_a )
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = StableDiffusionLDMaDPipeline
UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self ) -> List[str]:
torch.manual_seed(0 )
_a : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_a : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_a : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a : List[str] = CLIPTextModel(_a )
_a : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowercase ( self , _a , _a=0 ) -> Optional[Any]:
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : str = torch.Generator(device=_a ).manual_seed(_a )
_a : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> int:
_a : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : Union[str, Any] = self.get_dummy_components()
_a : str = StableDiffusionLDMaDPipeline(**_a )
_a : int = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Dict = self.get_dummy_inputs(_a )
_a : Any = ldmad_pipe(**_a )
_a , _a : Union[str, Any] = output.rgb, output.depth
_a : Tuple = rgb[0, -3:, -3:, -1]
_a : int = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
_a : int = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_a : Any = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = self.get_dummy_components()
_a : Optional[Any] = StableDiffusionLDMaDPipeline(**_a )
_a : int = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = self.get_dummy_inputs(_a )
_a : Union[str, Any] = 3 * [inputs['''prompt''']]
# forward
_a : List[Any] = ldmad_pipe(**_a )
_a , _a : str = output.rgb, output.depth
_a : str = rgb_slice_a[0, -3:, -3:, -1]
_a : Optional[Any] = depth_slice_a[0, -3:, -1]
_a : Any = self.get_dummy_inputs(_a )
_a : List[Any] = 3 * [inputs.pop('''prompt''' )]
_a : int = ldmad_pipe.tokenizer(
_a , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
_a : Tuple = text_inputs['''input_ids'''].to(_a )
_a : Union[str, Any] = ldmad_pipe.text_encoder(_a )[0]
_a : Optional[int] = prompt_embeds
# forward
_a : Dict = ldmad_pipe(**_a )
_a , _a : List[str] = output.rgb, output.depth
_a : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
_a : int = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : List[str] = self.get_dummy_components()
_a : Any = PNDMScheduler(skip_prk_steps=_a )
_a : Any = StableDiffusionLDMaDPipeline(**_a )
_a : Any = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Tuple = self.get_dummy_inputs(_a )
_a : int = '''french fries'''
_a : Tuple = ldmad_pipe(**_a , negative_prompt=_a )
_a , _a : Optional[Any] = output.rgb, output.depth
_a : List[Any] = rgb[0, -3:, -3:, -1]
_a : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
_a : Any = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_a : int = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Tuple:
_a : int = torch.Generator(device=_a ).manual_seed(_a )
_a : Optional[int] = np.random.RandomState(_a ).standard_normal((1, 4, 6_4, 6_4) )
_a : Dict = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_a : int = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
_a : Optional[Any] = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : int = self.get_inputs(_a )
_a : Optional[int] = ldmad_pipe(**_a )
_a , _a : Any = output.rgb, output.depth
_a : Tuple = rgb[0, -3:, -3:, -1].flatten()
_a : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
_a : int = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_a : Dict = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Optional[int]:
_a : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
_a : Dict = np.random.RandomState(_a ).standard_normal((1, 4, 6_4, 6_4) )
_a : Optional[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_a : str = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> Tuple:
_a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Any = self.get_inputs(_a )
_a : List[str] = ldmad_pipe(**_a )
_a , _a : Any = output.rgb, output.depth
_a : str = 0.49_5586
_a : Union[str, Any] = 0.3379_5515
_a : Any = 112.4_8518
_a : List[Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def __lowercase ( self ) -> int:
_a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : List[str] = self.get_inputs(_a )
_a : List[Any] = ldmad_pipe(**_a )
_a , _a : Union[str, Any] = output.rgb, output.depth
_a : int = 0.419_4127
_a : str = 0.3537_5586
_a : str = 0.563_8502
_a : Tuple = 0.3468_6103
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
def __UpperCAmelCase ( __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a : List[Any] = [0] * len(__a )
_a : int = []
_a : int = [1] * len(__a )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__a ) ):
if indegree[i] == 0:
queue.append(__a )
while queue:
_a : Optional[int] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
_a : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__a )
print(max(__a ) )
# Adjacency list of Graph
a__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
debug_launcher(test_script.main )
def __lowercase ( self ) -> List[str]:
debug_launcher(test_ops.main )
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a__ = get_logger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : List[str] = "dummy_data"
UpperCAmelCase__ : Dict = "datasets"
UpperCAmelCase__ : Any = False
def __init__( self , _a , _a , _a , _a = None , _a = False , _a = True , _a = None , ) -> Optional[Any]:
_a : Optional[int] = 0
_a : int = dataset_name
_a : Optional[int] = cache_dir
_a : Optional[Any] = use_local_dummy_data
_a : List[str] = config
# download_callbacks take a single url as input
_a : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_a : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_a : Optional[int] = str(_a )
# to be downloaded
_a : str = None
_a : int = None
@property
def __lowercase ( self ) -> Dict:
if self._dummy_file is None:
_a : Any = self.download_dummy_data()
return self._dummy_file
@property
def __lowercase ( self ) -> Optional[Any]:
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowercase ( self ) -> Any:
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowercase ( self ) -> Tuple:
_a : Tuple = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_a : Dict = cached_path(
_a , cache_dir=self.cache_dir , extract_compressed_file=_a , force_extract=_a )
return os.path.join(_a , self.dummy_file_name )
@property
def __lowercase ( self ) -> int:
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowercase ( self ) -> Dict:
if self._bucket_url is None:
_a : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowercase ( self ) -> Union[str, Any]:
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowercase ( self , _a , *_a ) -> str:
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_a : str = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_a : Optional[int] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_a , _a ):
return self.create_dummy_data_dict(_a , _a )
elif isinstance(_a , (list, tuple) ):
return self.create_dummy_data_list(_a , _a )
else:
return self.create_dummy_data_single(_a , _a )
def __lowercase ( self , _a , *_a ) -> Optional[Any]:
return self.download_and_extract(_a )
def __lowercase ( self , _a , _a ) -> List[str]:
return self.download_and_extract(_a )
def __lowercase ( self , _a , *_a , **_a ) -> List[Any]:
return path
def __lowercase ( self ) -> str:
return {}
def __lowercase ( self , _a , _a ) -> str:
_a : Dict = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_a , _a ):
for single_url in single_urls:
download_callback(_a )
else:
_a : Dict = single_urls
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_a , _a ):
_a : Optional[int] = [os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) ) for x in single_urls]
else:
_a : Tuple = single_urls
_a : Optional[int] = os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) )
_a : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(_a , _a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_a : Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowercase ( self , _a , _a ) -> List[str]:
_a : int = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_a : Any = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _a ) ) for url in data_url )
_a : List[str] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_a : Any = [data_url[0]] * len(_a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : Optional[int] = os.path.join(_a , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_a )
return dummy_data_list
def __lowercase ( self , _a , _a ) -> Dict:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : int = os.path.join(_a , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> Dict:
pass
def __lowercase ( self , _a ) -> List[Any]:
def _iter_archive_members(_a ):
# this preserves the order of the members inside the ZIP archive
_a : Optional[Any] = Path(self.dummy_file ).parent
_a : Tuple = path.relative_to(_a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_a : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_a )
_a : List[Any] = Path(_a )
_a : int = _iter_archive_members(_a ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_a ).as_posix(), file_path.open('''rb''' )
def __lowercase ( self , _a ) -> List[Any]:
if not isinstance(_a , _a ):
_a : str = [paths]
for path in paths:
if os.path.isfile(_a ):
if os.path.basename(_a ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_a ):
if os.path.basename(_a ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_a ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_a , _a )
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "sew-d"
def __init__( self , _a=3_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a=2 , _a=5_1_2 , _a=2_5_6 , _a=True , _a=True , _a=("p2c", "c2p") , _a="layer_norm" , _a="gelu_python" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=0.02 , _a=1e-7 , _a=1e-5 , _a="group" , _a="gelu" , _a=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _a=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a=False , _a=1_2_8 , _a=1_6 , _a=True , _a=0.05 , _a=1_0 , _a=2 , _a=0.0 , _a=1_0 , _a=0 , _a="mean" , _a=False , _a=False , _a=2_5_6 , _a=0 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
_a : Optional[Any] = hidden_size
_a : List[str] = feat_extract_norm
_a : List[str] = feat_extract_activation
_a : Tuple = list(_a )
_a : List[Any] = list(_a )
_a : List[str] = list(_a )
_a : Tuple = conv_bias
_a : str = num_conv_pos_embeddings
_a : int = num_conv_pos_embedding_groups
_a : List[Any] = len(self.conv_dim )
_a : Optional[int] = num_hidden_layers
_a : Optional[int] = intermediate_size
_a : int = squeeze_factor
_a : Optional[Any] = max_position_embeddings
_a : Any = position_buckets
_a : List[str] = share_att_key
_a : Optional[int] = relative_attention
_a : str = norm_rel_ebd
_a : List[Any] = list(_a )
_a : Optional[int] = hidden_act
_a : int = num_attention_heads
_a : str = hidden_dropout
_a : List[Any] = attention_dropout
_a : int = activation_dropout
_a : int = feat_proj_dropout
_a : Any = final_dropout
_a : Any = layer_norm_eps
_a : int = feature_layer_norm_eps
_a : Optional[int] = initializer_range
_a : Tuple = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : Optional[Any] = apply_spec_augment
_a : Optional[int] = mask_time_prob
_a : Tuple = mask_time_length
_a : str = mask_time_min_masks
_a : Union[str, Any] = mask_feature_prob
_a : Any = mask_feature_length
_a : Optional[int] = mask_feature_min_masks
# ctc loss
_a : Tuple = ctc_loss_reduction
_a : Optional[int] = ctc_zero_infinity
# sequence classification
_a : Optional[Any] = use_weighted_layer_sum
_a : Any = classifier_proj_size
@property
def __lowercase ( self ) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
a__ = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def __UpperCAmelCase ( __a : float ) -> str:
"""simple docstring"""
assert type(__a ) in (int, float) and decimal == int(__a )
_a : List[Any] = int(__a )
_a : int = ''''''
_a : Dict = False
if decimal < 0:
_a : Any = True
decimal *= -1
while decimal > 0:
_a , _a : Union[str, Any] = divmod(__a ,16 )
_a : Optional[Any] = values[remainder] + hexadecimal
_a : int = '''0x''' + hexadecimal
if negative:
_a : int = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __UpperCAmelCase ( __a : List[str] ) -> Tuple:
"""simple docstring"""
_a : Union[str, Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__a ,__a )
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a , _a : List[str] = emb.weight.shape
_a : Optional[Any] = nn.Linear(__a ,__a ,bias=__a )
_a : Optional[Any] = emb.weight.data
return lin_layer
def __UpperCAmelCase ( __a : Any ,__a : str=None ) -> int:
"""simple docstring"""
_a : List[Any] = {}
for old_key in state_dict.keys():
_a : Dict = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_a : Optional[Any] = key.replace('''moe_layer.experts.0''' ,F"""ffn.experts.expert_{expert_idx}""" )
else:
_a : Tuple = key.replace('''moe_layer.experts.''' ,'''ffn.experts.expert_''' )
if "gate" in key:
_a : Tuple = key.replace('''.moe_layer.gate.wg''' ,'''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
_a : Optional[Any] = key.replace('''.fc2.''' ,'''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
_a : str = key.replace('''.fc1.''' ,'''.ffn.fc1.''' )
if ".encoder_attn." in key:
_a : str = key.replace('''.encoder_attn.''' ,'''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
_a : Dict = key.replace('''encoder_attn_layer_norm''' ,'''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
_a : int = key.replace('''final_layer_norm''' ,'''ff_layer_norm''' )
_a : Any = state_dict[old_key]
return new_dict
def __UpperCAmelCase ( __a : Dict ,__a : Optional[Any] ,__a : str ,__a : Dict ,__a : str = WEIGHTS_NAME ) -> Optional[Any]:
"""simple docstring"""
_a : List[Any] = []
_a : str = 0
os.makedirs(__a ,exist_ok=__a )
for expert in range(__a ):
_a : Dict = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(__a ):
_a : Any = torch.load(__a )['''model''']
remove_ignore_keys_(__a )
_a : Union[str, Any] = rename_fairseq_keys(__a ,__a )
_a : List[Any] = os.path.join(
__a ,weights_name.replace('''.bin''' ,F"""-{len(__a )+1:05d}-of-???.bin""" ) )
torch.save(__a ,__a )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__a )[0]].dtype )
# Add the last block
_a : Union[str, Any] = os.path.join(__a ,weights_name.replace('''.bin''' ,F"""-{len(__a )+1:05d}-of-???.bin""" ) )
_a : List[Any] = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__a )
_a : Optional[Any] = rename_fairseq_keys(__a ,__a )
_a : Union[str, Any] = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__a ) == 1:
_a : Union[str, Any] = os.path.join(__a ,__a )
torch.save(__a ,__a )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__a ,__a )
# Otherwise, let's build the index
_a : List[Any] = {}
for idx, shard in enumerate(__a ):
_a : Any = weights_name.replace('''.bin''' ,F"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" )
_a : Union[str, Any] = os.path.join(__a ,weights_name.replace('''.bin''' ,F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a ,os.path.join(__a ,__a ) )
for key in shard:
_a : Dict = shard_file
# Add the metadata
_a : List[str] = {'''total_size''': total_size}
_a : int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__a ,__a ) ,'''w''' ,encoding='''utf-8''' ) as f:
_a : str = json.dumps(__a ,indent=2 ,sort_keys=__a ) + '''\n'''
f.write(__a )
return metadata, index
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
a__ = parser.parse_args()
a__ , a__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
a__ = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
a__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
_a : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_a , '''num_heads''' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=6_4 , _a=3 , _a=[1_6, 4_8, 9_6] , _a=[1, 3, 6] , _a=[1, 2, 1_0] , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[2, 2, 2] , _a=[False, False, True] , _a=[0.0, 0.0, 0.0] , _a=0.02 , _a=1e-1_2 , _a=True , _a=True , _a=2 , ) -> int:
_a : str = parent
_a : int = batch_size
_a : Optional[int] = image_size
_a : Union[str, Any] = patch_sizes
_a : Optional[int] = patch_stride
_a : str = patch_padding
_a : str = is_training
_a : List[Any] = use_labels
_a : List[Any] = num_labels
_a : int = num_channels
_a : Optional[Any] = embed_dim
_a : List[Any] = num_heads
_a : List[str] = stride_kv
_a : List[str] = depth
_a : str = cls_token
_a : Dict = attention_drop_rate
_a : Union[str, Any] = initializer_range
_a : List[Any] = layer_norm_eps
def __lowercase ( self ) -> Any:
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
# create a random int32 tensor of given shape
_a : int = ids_tensor([self.batch_size] , self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> List[Any]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __lowercase ( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = TFCvtModel(config=_a )
_a : Dict = model(_a , training=_a )
_a : Dict = (self.image_size, self.image_size)
_a , _a : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_a : Any = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_a : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __lowercase ( self , _a , _a , _a ) -> List[Any]:
_a : List[str] = self.num_labels
_a : str = TFCvtForImageClassification(_a )
_a : str = model(_a , labels=_a , training=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = self.prepare_config_and_inputs()
_a , _a , _a : Optional[Any] = config_and_inputs
_a : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : int = False
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = TFCvtModelTester(self )
_a : Optional[int] = TFCvtConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 )
def __lowercase ( self ) -> Any:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def __lowercase ( self ) -> str:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def __lowercase ( self ) -> Tuple:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def __lowercase ( self ) -> Optional[Any]:
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def __lowercase ( self ) -> str:
_a : Dict = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def __lowercase ( self ) -> Tuple:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_a )
_a : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Dict:
def check_hidden_states_output(_a , _a , _a ):
_a : str = model_class(_a )
_a : List[Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Any = outputs.hidden_states
_a : Tuple = len(self.model_tester.depth )
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Dict = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : List[Any] = TFCvtModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> Tuple:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __lowercase ( self ) -> Tuple:
_a : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_a : Optional[int] = self.default_image_processor
_a : Tuple = prepare_img()
_a : Dict = image_processor(images=_a , return_tensors='''tf''' )
# forward pass
_a : Dict = model(**_a )
# verify the logits
_a : Optional[int] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : Tuple = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1e-4 ) )
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=9_9 , _a=6_4 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[Any]:
_a : List[str] = parent
_a : int = batch_size
_a : Union[str, Any] = seq_length
_a : Dict = is_training
_a : List[Any] = use_input_mask
_a : str = use_token_type_ids
_a : List[str] = use_labels
_a : Tuple = vocab_size
_a : List[Any] = hidden_size
_a : Dict = embedding_size
_a : Dict = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : Tuple = intermediate_size
_a : Any = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Any = max_position_embeddings
_a : List[Any] = type_vocab_size
_a : Optional[Any] = type_sequence_label_size
_a : Optional[Any] = initializer_range
_a : Union[str, Any] = num_labels
_a : List[str] = num_choices
_a : Union[str, Any] = scope
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_input_mask:
_a : Any = random_attention_mask([self.batch_size, self.seq_length] )
_a : Union[str, Any] = None
if self.use_token_type_ids:
_a : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Tuple = None
_a : str = None
_a : str = None
if self.use_labels:
_a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : Any = ids_tensor([self.batch_size] , self.num_choices )
_a : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> Optional[Any]:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : int = MobileBertModel(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(_a , attention_mask=_a , token_type_ids=_a )
_a : Union[str, Any] = model(_a , token_type_ids=_a )
_a : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
_a : Optional[int] = MobileBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any:
_a : Any = MobileBertForNextSentencePrediction(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
_a : Optional[int] = MobileBertForPreTraining(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> int:
_a : List[Any] = MobileBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_a : Union[str, Any] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]:
_a : Any = self.num_labels
_a : Optional[int] = MobileBertForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
_a : int = self.num_labels
_a : str = MobileBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_a : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
_a : str = self.num_choices
_a : Tuple = MobileBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_a : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a : Optional[int] = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self ) -> List[Any]:
_a : str = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Any = config_and_inputs
_a : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = True
def __lowercase ( self , _a , _a , _a=False ) -> int:
_a : Tuple = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
_a : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
_a : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def __lowercase ( self ) -> Optional[int]:
_a : Optional[int] = MobileBertModelTester(self )
_a : Tuple = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> List[Any]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_a )
def __lowercase ( self ) -> Dict:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_a )
def __lowercase ( self ) -> Dict:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_a )
def __lowercase ( self ) -> Optional[Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_a )
def __lowercase ( self ) -> str:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_a )
def __UpperCAmelCase ( __a : Any ) -> Union[str, Any]:
"""simple docstring"""
return torch.tensor(
__a ,dtype=torch.long ,device=__a ,)
a__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> str:
_a : Any = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_a )
_a : int = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
_a : Tuple = model(_a )[0]
_a : List[Any] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , _a )
_a : Optional[int] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_a : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_a : Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = '''▁'''
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = BigBirdTokenizer
UpperCAmelCase__ : List[str] = BigBirdTokenizerFast
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : List[str] = True
def __lowercase ( self ) -> Optional[int]:
super().setUp()
_a : Dict = self.tokenizer_class(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> int:
_a : Any = '''<s>'''
_a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> int:
_a : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_a ) , 1_0_0_4 )
def __lowercase ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __lowercase ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_a : Tuple = self.get_tokenizer()
_a : Tuple = self.get_rust_tokenizer()
_a : Dict = '''I was born in 92000, and this is falsé.'''
_a : List[Any] = tokenizer.tokenize(_a )
_a : Any = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.encode(_a , add_special_tokens=_a )
_a : int = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : List[Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_a )
_a : Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
_a : List[Any] = BigBirdTokenizer(_a , keep_accents=_a )
_a : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
_a : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : str = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def __lowercase ( self ) -> str:
_a : List[Any] = '''Hello World!'''
_a : Optional[int] = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> List[Any]:
_a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
_a : List[Any] = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@require_torch
@slow
def __lowercase ( self ) -> str:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_a : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
_a : Optional[Any] = ''' '''.join(_a )
_a : Any = self.big_tokenizer.encode_plus(_a , return_tensors='''pt''' , return_token_type_ids=_a )
_a : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_a )
_a : str = BigBirdConfig(attention_type='''original_full''' )
_a : str = BigBirdModel(_a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_a )
model(**_a )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
_a : Tuple = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def __lowercase ( self ) -> Optional[int]:
# fmt: off
_a : Dict = {'''input_ids''': [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "AutoTokenizer"
UpperCAmelCase__ : int = ["tokenizer"]
UpperCAmelCase__ : int = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , _a , _a=None ) -> Dict:
super().__init__(_a )
_a : List[Any] = speaker_embeddings
@classmethod
def __lowercase ( cls , _a , _a="speaker_embeddings_path.json" , **_a ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
_a : List[Any] = get_file_from_repo(
_a , _a , subfolder=kwargs.pop('''subfolder''' , _a ) , cache_dir=kwargs.pop('''cache_dir''' , _a ) , force_download=kwargs.pop('''force_download''' , _a ) , proxies=kwargs.pop('''proxies''' , _a ) , resume_download=kwargs.pop('''resume_download''' , _a ) , local_files_only=kwargs.pop('''local_files_only''' , _a ) , use_auth_token=kwargs.pop('''use_auth_token''' , _a ) , revision=kwargs.pop('''revision''' , _a ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_a , _a )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_a : Optional[int] = None
else:
with open(_a ) as speaker_embeddings_json:
_a : Tuple = json.load(_a )
else:
_a : Dict = None
_a : List[Any] = AutoTokenizer.from_pretrained(_a , **_a )
return cls(tokenizer=_a , speaker_embeddings=_a )
def __lowercase ( self , _a , _a="speaker_embeddings_path.json" , _a="speaker_embeddings" , _a = False , **_a , ) -> List[str]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_a , _a , '''v2''' ) , exist_ok=_a )
_a : Any = {}
_a : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_a : Union[str, Any] = self._load_voice_preset(_a )
_a : List[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , _a , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=_a , )
_a : int = os.path.join(_a , F"""{prompt_key}_{key}.npy""" )
_a : Any = tmp_dict
with open(os.path.join(_a , _a ) , '''w''' ) as fp:
json.dump(_a , _a )
super().save_pretrained(_a , _a , **_a )
def __lowercase ( self , _a = None , **_a ) -> Optional[int]:
_a : Any = self.speaker_embeddings[voice_preset]
_a : int = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_a : Optional[int] = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _a ) , cache_dir=kwargs.pop('''cache_dir''' , _a ) , force_download=kwargs.pop('''force_download''' , _a ) , proxies=kwargs.pop('''proxies''' , _a ) , resume_download=kwargs.pop('''resume_download''' , _a ) , local_files_only=kwargs.pop('''local_files_only''' , _a ) , use_auth_token=kwargs.pop('''use_auth_token''' , _a ) , revision=kwargs.pop('''revision''' , _a ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_a : int = np.load(_a )
return voice_preset_dict
def __lowercase ( self , _a = None ) -> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , _a=None , _a=None , _a="pt" , _a=2_5_6 , _a=False , _a=True , _a=False , **_a , ) -> Dict:
if voice_preset is not None and not isinstance(_a , _a ):
if (
isinstance(_a , _a )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_a : Optional[Any] = self._load_voice_preset(_a )
else:
if isinstance(_a , _a ) and not voice_preset.endswith('''.npz''' ):
_a : List[Any] = voice_preset + '''.npz'''
_a : Union[str, Any] = np.load(_a )
if voice_preset is not None:
self._validate_voice_preset_dict(_a , **_a )
_a : Dict = BatchFeature(data=_a , tensor_type=_a )
_a : List[str] = self.tokenizer(
_a , return_tensors=_a , padding='''max_length''' , max_length=_a , return_attention_mask=_a , return_token_type_ids=_a , add_special_tokens=_a , **_a , )
if voice_preset is not None:
_a : Optional[Any] = voice_preset
return encoded_text
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCAmelCase ( __a : str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
_a : str = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_a : Union[str, Any] = F"""{olid} is not a valid Open Library olid"""
raise ValueError(__a )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def __UpperCAmelCase ( __a : dict ) -> dict:
"""simple docstring"""
_a : Tuple = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
_a : Optional[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_a : Any = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
_a : str = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__a ,__a ):
_a : Any = ''', '''.join(__a )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
a__ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
a__ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('''\n'''.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
a__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
a__ = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
a__ = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a__ = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
a__ = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def __UpperCAmelCase ( __a : Optional[Any] ) -> Dict:
"""simple docstring"""
_a : List[Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' ,__a )
return [m.group(0 ) for m in matches]
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
_a : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Any = {
config.replace('''Config''' ,'''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(__a )
_a : int = collections.defaultdict(__a )
_a : Union[str, Any] = collections.defaultdict(__a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(__a ):
_a : Union[str, Any] = None
if _re_tf_models.match(__a ) is not None:
_a : Any = tf_models
_a : str = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
_a : List[str] = flax_models
_a : str = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
_a : List[str] = pt_models
_a : Dict = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Union[str, Any] = True
break
# Try again after removing the last word in the name
_a : List[str] = ''''''.join(camel_case_split(__a )[:-1] )
_a : int = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Optional[int] = list(__a )
all_models.sort()
_a : Any = {'''model_type''': all_models}
_a : Optional[Any] = [pt_models[t] for t in all_models]
_a : List[str] = [tf_models[t] for t in all_models]
_a : Optional[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : Dict = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : Optional[Any] = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : List[str] = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : str = '''AutoTokenizer'''
_a : Union[str, Any] = [processors[t] for t in all_models]
return pd.DataFrame(__a )
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
_a : Any = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : int = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(__a ,__a ,__a ):
# The type of pipeline may not exist in this framework
if not hasattr(__a ,__a ):
continue
# First extract all model_names
_a : Union[str, Any] = []
for name in getattr(__a ,__a ).values():
if isinstance(__a ,__a ):
model_names.append(__a )
else:
model_names.extend(list(__a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
_a : int = get_frameworks_table()
_a : Optional[int] = Dataset.from_pandas(__a )
_a : str = hf_hub_download(
'''huggingface/transformers-metadata''' ,'''pipeline_tags.json''' ,repo_type='''dataset''' ,token=__a )
_a : Tuple = Dataset.from_json(__a )
_a : Optional[int] = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(__a ) )
}
_a : Union[str, Any] = update_pipeline_and_auto_class_table(__a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : Dict = sorted(table.keys() )
_a : str = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_a : Any = Dataset.from_pandas(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(__a ,'''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(__a ,'''pipeline_tags.json''' ) )
if commit_sha is not None:
_a : Dict = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : int = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' ,folder_path=__a ,repo_type='''dataset''' ,token=__a ,commit_message=__a ,)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
_a : Any = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Union[str, Any] = transformers_module.pipelines.SUPPORTED_TASKS
_a : Optional[Any] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Optional[Any] = pipeline_tasks[key]['''pt''']
if isinstance(__a ,(list, tuple) ):
_a : Union[str, Any] = model[0]
_a : Any = model.__name__
if model not in in_table.values():
missing.append(__a )
if len(__a ) > 0:
_a : Optional[int] = ''', '''.join(__a )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
a__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : torch.FloatTensor
UpperCAmelCase__ : Optional[torch.FloatTensor] = None
class UpperCAmelCase_ ( __lowercase , __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = 2
@register_to_config
def __init__( self , _a = 0.02 , _a = 1_0_0 , _a = 1.007 , _a = 8_0 , _a = 0.05 , _a = 5_0 , ) -> Dict:
# standard deviation of the initial noise distribution
_a : int = sigma_max
# setable values
_a : int = None
_a : np.IntTensor = None
_a : torch.FloatTensor = None # sigma(t_i)
def __lowercase ( self , _a , _a = None ) -> torch.FloatTensor:
return sample
def __lowercase ( self , _a , _a = None ) -> str:
_a : str = num_inference_steps
_a : str = np.arange(0 , self.num_inference_steps )[::-1].copy()
_a : int = torch.from_numpy(_a ).to(_a )
_a : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_a : Optional[int] = torch.tensor(_a , dtype=torch.floataa , device=_a )
def __lowercase ( self , _a , _a , _a = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_a : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
_a : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=_a ).to(sample.device )
_a : int = sigma + gamma * sigma
_a : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowercase ( self , _a , _a , _a , _a , _a = True , ) -> Union[KarrasVeOutput, Tuple]:
_a : List[str] = sample_hat + sigma_hat * model_output
_a : Any = (sample_hat - pred_original_sample) / sigma_hat
_a : List[str] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a = True , ) -> Union[KarrasVeOutput, Tuple]:
_a : str = sample_prev + sigma_prev * model_output
_a : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
_a : Any = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_a , derivative=_a , pred_original_sample=_a )
def __lowercase ( self , _a , _a , _a ) -> Tuple:
raise NotImplementedError()
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCAmelCase ( __a : List[str] ) -> int:
"""simple docstring"""
_a : List[Any] = args.pruning_method
_a : Tuple = args.threshold
_a : str = args.model_name_or_path.rstrip('''/''' )
_a : int = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_a : List[str] = torch.load(os.path.join(__a ,'''pytorch_model.bin''' ) )
_a : int = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_a : Union[str, Any] = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_a : int = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_a : Tuple = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_a : List[str] = MagnitudeBinarizer.apply(inputs=__a ,threshold=__a )
_a : Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_a : Any = name[:-6]
_a : Optional[int] = model[F"""{prefix_}mask_scores"""]
_a : Dict = TopKBinarizer.apply(__a ,__a )
_a : Any = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_a : Dict = name[:-6]
_a : Union[str, Any] = model[F"""{prefix_}mask_scores"""]
_a : List[str] = ThresholdBinarizer.apply(__a ,__a ,__a )
_a : str = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_a : Optional[Any] = name[:-6]
_a : Dict = model[F"""{prefix_}mask_scores"""]
_a , _a : Optional[int] = -0.1, 1.1
_a : Optional[Any] = torch.sigmoid(__a )
_a : Tuple = s * (r - l) + l
_a : str = s_bar.clamp(min=0.0 ,max=1.0 )
_a : List[str] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_a : Optional[int] = os.path.join(
os.path.dirname(__a ) ,F"""bertarized_{os.path.basename(__a )}""" )
if not os.path.isdir(__a ):
shutil.copytree(__a ,__a )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(__a ,os.path.join(__a ,'''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
a__ = parser.parse_args()
main(args)
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
def __UpperCAmelCase ( __a : int = 1_000_000 ) -> int:
"""simple docstring"""
_a : List[Any] = set(range(3 ,__a ,2 ) )
primes.add(2 )
for p in range(3 ,__a ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,__a ,__a ) ) )
_a : Any = [float(__a ) for n in range(limit + 1 )]
for p in primes:
for n in range(__a ,limit + 1 ,__a ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_a : int = str(bin(__a ) )[2:] # remove the leading "0b"
_a : Optional[int] = str(bin(__a ) )[2:] # remove the leading "0b"
_a : Any = max(len(__a ) ,len(__a ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__a ) ,b_binary.zfill(__a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : str = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
_a : Optional[Any] = F"""Input value of [number={number}] must be > 0"""
raise ValueError(__a )
_a : Optional[int] = 1
for i in range(1 ,__a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
from collections.abc import Callable
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a = None ) -> None:
# Stores actual heap items.
_a : list = []
# Stores indexes of each item for supporting updates and deletion.
_a : dict = {}
# Stores current size of heap.
_a : str = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_a : str = key or (lambda _a : x)
def __lowercase ( self , _a ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self , _a ) -> int | None:
_a : Optional[int] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self , _a ) -> int | None:
_a : Optional[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self , _a , _a ) -> None:
_a , _a : Optional[int] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_a , _a : Optional[Any] = self.arr[j], self.arr[i]
def __lowercase ( self , _a , _a ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self , _a ) -> int:
_a : Any = self._left(_a )
_a : Optional[int] = self._right(_a )
_a : str = i
if left is not None and not self._cmp(_a , _a ):
_a : Union[str, Any] = left
if right is not None and not self._cmp(_a , _a ):
_a : Tuple = right
return valid_parent
def __lowercase ( self , _a ) -> None:
_a : Dict = self._parent(_a )
while parent is not None and not self._cmp(_a , _a ):
self._swap(_a , _a )
_a , _a : str = parent, self._parent(_a )
def __lowercase ( self , _a ) -> None:
_a : Tuple = self._get_valid_parent(_a )
while valid_parent != index:
self._swap(_a , _a )
_a , _a : Tuple = valid_parent, self._get_valid_parent(_a )
def __lowercase ( self , _a , _a ) -> None:
if item not in self.pos_map:
return
_a : List[Any] = self.pos_map[item]
_a : Any = [item, self.key(_a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_a )
self._heapify_down(_a )
def __lowercase ( self , _a ) -> None:
if item not in self.pos_map:
return
_a : Optional[int] = self.pos_map[item]
del self.pos_map[item]
_a : Dict = self.arr[self.size - 1]
_a : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_a )
self._heapify_down(_a )
def __lowercase ( self , _a , _a ) -> None:
_a : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_a )] )
else:
_a : Optional[int] = [item, self.key(_a )]
_a : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self ) -> tuple | None:
return self.arr[0] if self.size else None
def __lowercase ( self ) -> tuple | None:
_a : Optional[int] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
a__ = 0 # The first color of the flag.
a__ = 1 # The second color of the flag.
a__ = 2 # The third color of the flag.
a__ = (red, white, blue)
def __UpperCAmelCase ( __a : list ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__a ) == 1:
return list(__a )
_a : int = 0
_a : int = len(__a ) - 1
_a : int = 0
while mid <= high:
if sequence[mid] == colors[0]:
_a , _a : Optional[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_a , _a : Any = sequence[high], sequence[mid]
high -= 1
else:
_a : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = input('''Enter numbers separated by commas:\n''').strip()
a__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
_a : int = int(number**0.5 )
return number == sq * sq
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ,__a : int ,__a : int ,__a : int ) -> tuple[int, int]:
"""simple docstring"""
_a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_a : int = x_den * y_den * z_den
_a : int = gcd(__a ,__a )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCAmelCase ( __a : int = 35 ) -> int:
"""simple docstring"""
_a : set = set()
_a : int
_a : Fraction = Fraction(0 )
_a : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
_a : Optional[Any] = x_num * y_den + x_den * y_num
_a : Any = x_den * y_den
_a : List[str] = gcd(__a ,__a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : List[Any] = add_three(
__a ,__a ,__a ,__a ,__a ,__a )
unique_s.add(__a )
# n=2
_a : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_a : Tuple = x_den * x_den * y_den * y_den
if is_sq(__a ) and is_sq(__a ):
_a : List[str] = int(sqrt(__a ) )
_a : List[str] = int(sqrt(__a ) )
_a : List[Any] = gcd(__a ,__a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : List[Any] = add_three(
__a ,__a ,__a ,__a ,__a ,__a )
unique_s.add(__a )
# n=-1
_a : Optional[Any] = x_num * y_num
_a : List[Any] = x_den * y_num + x_num * y_den
_a : List[Any] = gcd(__a ,__a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Any = add_three(
__a ,__a ,__a ,__a ,__a ,__a )
unique_s.add(__a )
# n=2
_a : int = x_num * x_num * y_num * y_num
_a : Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__a ) and is_sq(__a ):
_a : str = int(sqrt(__a ) )
_a : Optional[int] = int(sqrt(__a ) )
_a : Optional[int] = gcd(__a ,__a )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_a : Optional[int] = add_three(
__a ,__a ,__a ,__a ,__a ,__a )
unique_s.add(__a )
for num, den in unique_s:
total += Fraction(__a ,__a )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
a__ = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
a__ = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def __UpperCAmelCase ( __a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a : List[Any] = (images / 2 + 0.5).clamp(0 ,1 )
_a : List[Any] = images.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
_a : Any = numpy_to_pil(__a )
return images
def __UpperCAmelCase ( __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
if images.ndim == 3:
_a : Optional[int] = images[None, ...]
_a : Tuple = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a : Optional[int] = [Image.fromarray(image.squeeze() ,mode='''L''' ) for image in images]
else:
_a : Any = [Image.fromarray(__a ) for image in images]
return pil_images
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
pass
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> None:
_a : Any = data
_a : Node | None = None
def __iter__( self ) -> List[str]:
_a : Optional[int] = self
_a : str = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_a )
yield node.data
_a : List[Any] = node.next_node
@property
def __lowercase ( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ = Node(1)
a__ = Node(2)
a__ = Node(3)
a__ = Node(4)
print(root_node.has_loop) # False
a__ = root_node.next_node
print(root_node.has_loop) # True
a__ = Node(5)
a__ = Node(6)
a__ = Node(5)
a__ = Node(6)
print(root_node.has_loop) # False
a__ = Node(1)
print(root_node.has_loop) # False
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a__ = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __UpperCAmelCase ( __a : Optional[int] ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def __UpperCAmelCase ( __a : Optional[int] ) -> str:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_a : Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a ,id=__a )
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a__ = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def __UpperCAmelCase ( __a : Optional[int]=True ) -> Optional[int]:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = None
UpperCAmelCase__ : int = None
def __lowercase ( self , _a , _a ) -> Optional[Any]:
with TemporaryDirectory() as tmp_dir:
_a : Union[str, Any] = dataset_module_factory(_a , cache_dir=_a )
_a : Tuple = import_main_class(dataset_module.module_path , dataset=_a )
_a : DatasetBuilder = builder_cls(
cache_dir=_a , config_name=_a , hash=dataset_module.hash , )
_a : Union[str, Any] = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_a ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
_a : Dict = cached_path(_a , cache_dir=_a )
self.assertTrue(os.path.exists(_a ) )
@pytest.mark.integration
def __UpperCAmelCase ( __a : List[str] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
_a : str = dataset_module_factory('''wikipedia''' ,cache_dir=__a )
_a : Dict = import_main_class(dataset_module.module_path )
_a : DatasetBuilder = builder_cls(
cache_dir=__a ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_a : Union[str, Any] = None
builder_instance.download_and_prepare()
_a : Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCAmelCase ( __a : Optional[Any] ) -> Dict:
"""simple docstring"""
_a : Optional[Any] = dataset_module_factory('''wikipedia''' ,cache_dir=__a )
_a : Any = import_main_class(dataset_module.module_path ,dataset=__a )
_a : DatasetBuilder = builder_cls(
cache_dir=__a ,config_name='''20220301.frr''' ,hash=dataset_module.hash ,)
_a : Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__a ,__a )
assert "train" in ds
assert isinstance(ds['''train'''] ,__a )
assert next(iter(ds['''train'''] ) )
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = '''▁'''
a__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
a__ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
a__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , _a , _a=None , _a=None , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_a : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_a : int = {} if sp_model_kwargs is None else sp_model_kwargs
_a : List[Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a , tgt_lang=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_a : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a : Any = 1
_a : int = len(self.sp_model )
_a : Optional[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a )
}
_a : str = {v: k for k, v in self.lang_code_to_id.items()}
_a : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a : Dict = src_lang if src_lang is not None else '''en_XX'''
_a : Optional[Any] = self.lang_code_to_id[self._src_lang]
_a : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowercase ( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowercase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __lowercase ( self , _a ) -> None:
_a : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
_a : Tuple = self.__dict__.copy()
_a : Optional[Any] = None
return state
def __setstate__( self , _a ) -> None:
_a : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Dict = {}
_a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self ) -> Dict:
_a : Dict = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def __lowercase ( self , _a ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : List[Any] = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self , _a ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self , _a ) -> Dict:
_a : Any = []
_a : Optional[int] = ''''''
_a : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_a : int = True
_a : List[Any] = []
else:
current_sub_tokens.append(_a )
_a : Tuple = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_a : List[Any] = [1] * len(self.prefix_tokens )
_a : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def __lowercase ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self , _a , _a , _a , _a , **_a ) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_a : Optional[Any] = src_lang
_a : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
_a : Optional[Any] = self.convert_tokens_to_ids(_a )
_a : Union[str, Any] = tgt_lang_id
return inputs
def __lowercase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ) -> BatchEncoding:
_a : Tuple = src_lang
_a : Dict = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __lowercase ( self ) -> int:
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self ) -> Optional[int]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self , _a ) -> None:
_a : int = self.lang_code_to_id[src_lang]
_a : List[str] = [self.cur_lang_code_id]
_a : List[str] = [self.eos_token_id]
def __lowercase ( self , _a ) -> None:
_a : List[str] = self.lang_code_to_id[tgt_lang]
_a : str = [self.cur_lang_code_id]
_a : Any = [self.eos_token_id]
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
import datasets
from .evaluate import evaluate
a__ = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
a__ = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
a__ = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowercase ( self , _a , _a ) -> Optional[int]:
_a : Optional[int] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_a : Dict = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_a : Union[str, Any] = evaluate(dataset=_a , predictions=_a )
return score
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , **_a , ) -> None:
super().__init__(**_a )
_a : List[Any] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4}
_a : List[str] = get_size_dict(_a , default_to_square=_a )
_a : Optional[int] = do_resize
_a : str = size
_a : Dict = resample
_a : Any = do_rescale
_a : int = rescale_factor
_a : Tuple = do_normalize
_a : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a : str = image_std if image_std is not None else OPENAI_CLIP_STD
_a : List[Any] = do_convert_rgb
def __lowercase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_a : str = get_size_dict(_a , default_to_square=_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_a : int = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> int:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : str = do_resize if do_resize is not None else self.do_resize
_a : List[Any] = resample if resample is not None else self.resample
_a : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
_a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_a : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_a : Union[str, Any] = image_std if image_std is not None else self.image_std
_a : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a : Dict = size if size is not None else self.size
_a : List[str] = get_size_dict(_a , default_to_square=_a )
_a : Any = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a : str = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : Union[str, Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : int = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : int = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : int = [to_channel_dimension_format(_a , _a ) for image in images]
_a : Dict = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
return encoded_outputs
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( __a : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__a ,__a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_a : List[str] = QuantumRegister(__a ,'''qr''' )
_a : List[str] = ClassicalRegister(__a ,'''cr''' )
_a : str = QuantumCircuit(__a ,__a )
_a : Optional[int] = number_of_qubits
for i in range(__a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,__a ,__a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__a ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__a ,__a )
# simulate with 10000 shots
_a : Dict = Aer.get_backend('''qasm_simulator''' )
_a : Union[str, Any] = execute(__a ,__a ,shots=10_000 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "mvp"
UpperCAmelCase__ : Optional[int] = ["past_key_values"]
UpperCAmelCase__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _a=5_0_2_6_7 , _a=1_0_2_4 , _a=1_2 , _a=4_0_9_6 , _a=1_6 , _a=1_2 , _a=4_0_9_6 , _a=1_6 , _a=0.0 , _a=0.0 , _a="gelu" , _a=1_0_2_4 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=0.0 , _a=False , _a=True , _a=1 , _a=0 , _a=2 , _a=True , _a=2 , _a=2 , _a=False , _a=1_0_0 , _a=8_0_0 , **_a , ) -> List[str]:
_a : Any = vocab_size
_a : Optional[int] = max_position_embeddings
_a : str = d_model
_a : Union[str, Any] = encoder_ffn_dim
_a : Tuple = encoder_layers
_a : str = encoder_attention_heads
_a : List[str] = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Union[str, Any] = decoder_attention_heads
_a : List[str] = dropout
_a : Any = attention_dropout
_a : str = activation_dropout
_a : List[str] = activation_function
_a : Dict = init_std
_a : List[Any] = encoder_layerdrop
_a : List[str] = decoder_layerdrop
_a : List[Any] = classifier_dropout
_a : int = use_cache
_a : Optional[int] = encoder_layers
_a : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_a : List[str] = use_prompt
_a : Optional[Any] = prompt_length
_a : int = prompt_mid_dim
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _a ):
_a : List[str] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ = logging.getLogger(__name__)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
_a : str = False
def __lowercase ( self , _a , _a , _a , _a ) -> int:
if not self.initialized:
_a : Optional[Any] = RagRetriever(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
_a : Optional[int] = True
def __lowercase ( self ) -> str:
self.retriever.index.init_index()
def __lowercase ( self , _a , _a ) -> Any:
_a , _a : str = self.retriever._main_retrieve(_a , _a )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a , _a , _a=None ) -> Optional[int]:
if index is not None and index.is_initialized() and len(_a ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
_a : List[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_a , _a , _a , _a )
for worker in self.retrieval_workers
] )
def __lowercase ( self ) -> str:
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowercase ( self , _a , _a ) -> int:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_a : Optional[int] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
_a , _a : Optional[int] = ray.get(random_worker.retrieve.remote(_a , _a ) )
else:
_a , _a : List[Any] = self._main_retrieve(_a , _a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_a )
@classmethod
def __lowercase ( cls , _a , _a=None , **_a ) -> Optional[int]:
return super(_a , cls ).get_tokenizers(_a , _a , **_a )
@classmethod
def __lowercase ( cls , _a , _a , _a=None , **_a ) -> List[str]:
_a : str = kwargs.pop('''config''' , _a ) or RagConfig.from_pretrained(_a , **_a )
_a : str = RagTokenizer.from_pretrained(_a , config=_a )
_a : int = rag_tokenizer.question_encoder
_a : Union[str, Any] = rag_tokenizer.generator
if indexed_dataset is not None:
_a : List[Any] = '''custom'''
_a : Any = CustomHFIndex(config.retrieval_vector_size , _a )
else:
_a : str = cls._build_index(_a )
return cls(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , retrieval_workers=_a , index=_a , )
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
a__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
a__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Union[str, Any] = BartTokenizer
def __init__( self , _a=None , _a=None , _a=None , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , _a=True , **_a , ) -> List[str]:
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_a : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_a : Tuple = getattr(_a , pre_tok_state.pop('''type''' ) )
_a : int = add_prefix_space
_a : Optional[Any] = pre_tok_class(**_a )
_a : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a : Optional[int] = '''post_processor'''
_a : Optional[int] = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_a : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a : List[str] = tuple(state['''sep'''] )
if "cls" in state:
_a : Tuple = tuple(state['''cls'''] )
_a : int = False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_a : Any = add_prefix_space
_a : Optional[Any] = True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_a : Tuple = trim_offsets
_a : str = True
if changes_to_apply:
_a : Dict = getattr(_a , state.pop('''type''' ) )
_a : Tuple = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def __lowercase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowercase ( self , _a ) -> str:
_a : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_a : List[Any] = value
def __lowercase ( self , *_a , **_a ) -> BatchEncoding:
_a : Optional[Any] = kwargs.get('''is_split_into_words''' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> BatchEncoding:
_a : List[Any] = kwargs.get('''is_split_into_words''' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_a , **_a )
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
_a : Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __lowercase ( self , _a , _a=None ) -> Any:
_a : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Tuple = [self.sep_token_id]
_a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=9_9 , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=1_6 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> Optional[Any]:
_a : str = parent
_a : Optional[Any] = batch_size
_a : Tuple = seq_length
_a : Union[str, Any] = is_training
_a : Optional[Any] = use_input_mask
_a : List[Any] = use_token_type_ids
_a : Any = use_labels
_a : Tuple = vocab_size
_a : Tuple = hidden_size
_a : Optional[int] = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : List[Any] = intermediate_size
_a : int = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : List[Any] = type_vocab_size
_a : int = type_sequence_label_size
_a : str = initializer_range
_a : Dict = num_labels
_a : Union[str, Any] = num_choices
_a : Any = scope
def __lowercase ( self ) -> str:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Any = None
if self.use_input_mask:
_a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_a : int = None
if self.use_token_type_ids:
_a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : Tuple = None
_a : Union[str, Any] = None
_a : Any = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a : Dict = ids_tensor([self.batch_size] , self.num_choices )
_a : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self ) -> str:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]:
_a : Union[str, Any] = BioGptModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[Any] = model(_a , attention_mask=_a )
_a : List[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ) -> Tuple:
_a : int = BioGptForCausalLM(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , _a , _a , _a , _a , _a , *_a ) -> Union[str, Any]:
_a : List[Any] = BioGptModel(config=_a )
model.to(_a )
model.eval()
# create attention mask
_a : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
_a : List[Any] = self.seq_length // 2
_a : Union[str, Any] = 0
# first forward pass
_a , _a : Optional[Any] = model(_a , attention_mask=_a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
_a : Tuple = ids_tensor((1,) , _a ).item() + 1
_a : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
_a : Dict = random_other_next_tokens
# append to next input_ids and attn_mask
_a : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
_a : List[str] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_a )] , dim=1 , )
# get two different outputs
_a : Optional[int] = model(_a , attention_mask=_a )['''last_hidden_state''']
_a : Tuple = model(_a , past_key_values=_a , attention_mask=_a )['''last_hidden_state''']
# select random slice
_a : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a : Any = output_from_no_past[:, -1, random_slice_idx].detach()
_a : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def __lowercase ( self , _a , _a , _a , _a , _a , *_a ) -> Dict:
_a : Optional[Any] = BioGptModel(config=_a ).to(_a ).eval()
_a : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
# first forward pass
_a : Union[str, Any] = model(_a , attention_mask=_a , use_cache=_a )
_a , _a : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_a : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a : Optional[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
_a : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
_a : int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
_a : Optional[Any] = model(_a , attention_mask=_a )['''last_hidden_state''']
_a : int = model(_a , attention_mask=_a , past_key_values=_a )[
'''last_hidden_state'''
]
# select random slice
_a : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a : str = output_from_no_past[:, -3:, random_slice_idx].detach()
_a : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def __lowercase ( self , _a , _a , _a , _a , _a , *_a , _a=False ) -> Any:
_a : List[Any] = BioGptForCausalLM(_a )
model.to(_a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_a : Dict = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase ( self , _a , *_a ) -> str:
_a : Union[str, Any] = BioGptModel(_a )
_a : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase ( self , _a , _a , _a , _a , _a , *_a ) -> Union[str, Any]:
_a : Optional[Any] = self.num_labels
_a : str = BioGptForTokenClassification(_a )
model.to(_a )
model.eval()
_a : int = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self ) -> List[str]:
_a : List[Any] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : Union[str, Any] = config_and_inputs
_a : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
def __lowercase ( self ) -> str:
_a : str = BioGptModelTester(self )
_a : Any = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def __lowercase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a : Any = type
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_a )
def __lowercase ( self ) -> Optional[Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_a , gradient_checkpointing=_a )
def __lowercase ( self ) -> Optional[Any]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_a )
def __lowercase ( self ) -> List[str]:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_a )
@slow
def __lowercase ( self ) -> List[Any]:
_a : List[str] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_a )
_a : Tuple = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_a : Optional[Any] = '''left'''
# Define PAD Token = EOS Token = 50256
_a : Dict = tokenizer.eos_token
_a : Tuple = model.config.eos_token_id
# use different length sentences to test batching
_a : str = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_a : Tuple = tokenizer(_a , return_tensors='''pt''' , padding=_a )
_a : Optional[int] = inputs['''input_ids'''].to(_a )
_a : int = model.generate(
input_ids=_a , attention_mask=inputs['''attention_mask'''].to(_a ) , )
_a : str = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_a )
_a : Optional[int] = model.generate(input_ids=_a )
_a : Any = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_a : Any = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_a )
_a : List[str] = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
_a : int = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_a : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
_a : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
_a : Optional[int] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase ( self ) -> Any:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : int = BioGptModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self ) -> Union[str, Any]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[Any] = 3
_a : str = input_dict['''input_ids''']
_a : Any = input_ids.ne(1 ).to(_a )
_a : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_a : Any = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : Tuple = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self ) -> Any:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = 3
_a : Union[str, Any] = '''multi_label_classification'''
_a : Optional[int] = input_dict['''input_ids''']
_a : Optional[int] = input_ids.ne(1 ).to(_a )
_a : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_a : str = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
_a : List[str] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
_a : List[str] = model(_a )[0]
_a : List[Any] = 4_2_3_8_4
_a : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _a )
_a : Dict = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
_a : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_a )
torch.manual_seed(0 )
_a : Tuple = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_a )
_a : Optional[int] = model.generate(
**_a , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=_a , )
_a : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
_a : Optional[int] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_a , _a )
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a__ = False
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , _a=3_2 ) -> str:
set_seed(0 )
_a : str = UNetaDModel(sample_size=_a , in_channels=3 , out_channels=3 )
_a : Tuple = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def __lowercase ( self ) -> Optional[int]:
_a : List[Any] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_a : Any = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_a , )
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=_a , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_a : Dict = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(_a ) for _ in range(4 )]
_a : List[Any] = [torch.randn((4, 3, 3_2, 3_2) ).to(_a ) for _ in range(4 )]
_a : Tuple = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(_a ) for _ in range(4 )]
# train with a DDPM scheduler
_a , _a : List[str] = self.get_model_optimizer(resolution=3_2 )
model.train().to(_a )
for i in range(4 ):
optimizer.zero_grad()
_a : Optional[int] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_a : Dict = model(_a , timesteps[i] ).sample
_a : Tuple = torch.nn.functional.mse_loss(_a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_a , _a : List[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(_a )
for i in range(4 ):
optimizer.zero_grad()
_a : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_a : Tuple = model(_a , timesteps[i] ).sample
_a : List[str] = torch.nn.functional.mse_loss(_a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
a__ = '''Create a default config file for Accelerate with only a few flags set.'''
def __UpperCAmelCase ( __a : Optional[int]="no" ,__a : str = default_json_config_file ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : List[Any] = Path(__a )
path.parent.mkdir(parents=__a ,exist_ok=__a )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_a : List[Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_a : Optional[int] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
_a : Any = torch.cuda.device_count()
_a : List[Any] = num_gpus
_a : Dict = False
if num_gpus > 1:
_a : str = '''MULTI_GPU'''
else:
_a : Union[str, Any] = '''NO'''
elif is_xpu_available() and use_xpu:
_a : str = torch.xpu.device_count()
_a : List[str] = num_xpus
_a : Union[str, Any] = False
if num_xpus > 1:
_a : Tuple = '''MULTI_XPU'''
else:
_a : Tuple = '''NO'''
elif is_npu_available():
_a : Dict = torch.npu.device_count()
_a : Dict = num_npus
_a : Optional[int] = False
if num_npus > 1:
_a : List[str] = '''MULTI_NPU'''
else:
_a : int = '''NO'''
else:
_a : str = 0
_a : Optional[Any] = True
_a : int = 1
_a : int = '''NO'''
_a : Tuple = ClusterConfig(**__a )
config.to_json_file(__a )
return path
def __UpperCAmelCase ( __a : Tuple ,__a : Any ) -> List[Any]:
"""simple docstring"""
_a : List[str] = parser.add_parser('''default''' ,parents=__a ,help=__a ,formatter_class=__a )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,dest='''save_location''' ,)
parser.add_argument(
'''--mixed_precision''' ,choices=['''no''', '''fp16''', '''bf16'''] ,type=__a ,help='''Whether or not to use mixed precision training. '''
'''Choose between FP16 and BF16 (bfloat16) training. '''
'''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' ,default='''no''' ,)
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : Dict ) -> List[Any]:
"""simple docstring"""
_a : List[Any] = write_basic_config(args.mixed_precision ,args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[int]:
_a : str = logging.get_logger()
# the current default level is logging.WARNING
_a : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_a )
def __lowercase ( self ) -> Any:
_a : int = logging.get_verbosity()
_a : List[str] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
_a : List[Any] = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_a ) as cl:
logger.warning(_a )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_a ) as cl:
logger.warning(_a )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_a ) as cl:
logger.warning(_a )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(_a )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def __lowercase ( self ) -> Dict:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_a : str = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
_a : Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , _a )
_a : int = logging.log_levels[env_level_str]
_a : Dict = logging.get_verbosity()
self.assertEqual(
_a , _a , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_a : str = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def __lowercase ( self ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_a : Union[str, Any] = logging.logging.getLogger()
with CaptureLogger(_a ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def __lowercase ( self ) -> int:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_a : Any = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
_a : List[Any] = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_a ) as cl:
logger.warning_advice(_a )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_a ) as cl:
logger.warning_advice(_a )
self.assertEqual(cl.out , msg + '''\n''' )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PIL.Image.BICUBIC , _a = True , _a = None , _a = 1 / 2_5_5 , _a = True , _a = True , _a = None , _a = None , **_a , ) -> None:
super().__init__(**_a )
_a : Dict = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
_a : int = get_size_dict(_a )
_a : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Dict = get_size_dict(_a , param_name='''crop_size''' )
_a : Union[str, Any] = do_resize
_a : Any = size
_a : List[Any] = resample
_a : List[Any] = do_center_crop
_a : Union[str, Any] = crop_size
_a : str = do_rescale
_a : Union[str, Any] = rescale_factor
_a : int = do_normalize
_a : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self , _a , _a , _a = PIL.Image.BICUBIC , _a = None , **_a , ) -> np.ndarray:
_a : List[Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_a , size=(size['''height'''], size['''width''']) , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
_a : Union[str, Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> List[Any]:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : int = do_resize if do_resize is not None else self.do_resize
_a : int = resample if resample is not None else self.resample
_a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : Any = do_rescale if do_rescale is not None else self.do_rescale
_a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_a : Optional[int] = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = size if size is not None else self.size
_a : str = get_size_dict(_a )
_a : List[Any] = crop_size if crop_size is not None else self.crop_size
_a : int = get_size_dict(_a , param_name='''crop_size''' )
_a : Union[str, Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
_a : Optional[int] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
_a : Optional[Any] = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : Dict = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : Tuple = [to_channel_dimension_format(_a , _a ) for image in images]
_a : Dict = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
import inspect
import unittest
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[int]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def __lowercase ( self ) -> Any:
import diffusers
from diffusers.dependency_versions_table import deps
_a : Any = inspect.getmembers(_a , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
_a : List[str] = '''k-diffusion'''
elif backend == "invisible_watermark":
_a : Tuple = '''invisible-watermark'''
assert backend in deps, F"""{backend} is not in the deps table!"""
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ = 16
a__ = 32
def __UpperCAmelCase ( __a : Accelerator ,__a : int = 16 ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_a : Tuple = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__a : Tuple ):
# max_length=None => use the model max length (it's actually the default)
_a : int = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__a ,max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : List[str] = datasets.map(
__a ,batched=__a ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : Dict = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__a : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : List[Any] = 16
elif accelerator.mixed_precision != "no":
_a : List[str] = 8
else:
_a : Union[str, Any] = None
return tokenizer.pad(
__a ,padding='''longest''' ,max_length=__a ,pad_to_multiple_of=__a ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
_a : Any = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__a ,collate_fn=__a ,batch_size=__a )
_a : Any = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__a ,collate_fn=__a ,batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ) -> Any:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,__a ) == "1":
_a : Optional[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_a : Tuple = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with='''all''' ,project_dir=args.project_dir )
else:
_a : Optional[Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : int = config['''lr''']
_a : Optional[int] = int(config['''num_epochs'''] )
_a : Optional[Any] = int(config['''seed'''] )
_a : Dict = int(config['''batch_size'''] )
set_seed(__a )
_a , _a : Optional[int] = get_dataloaders(__a ,__a )
_a : List[str] = evaluate.load('''glue''' ,'''mrpc''' )
# If the batch size is too big we use gradient accumulation
_a : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Tuple = batch_size // MAX_GPU_BATCH_SIZE
_a : Dict = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Any = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : Dict = model.to(accelerator.device )
# Instantiate optimizer
_a : Tuple = AdamW(params=model.parameters() ,lr=__a )
# Instantiate scheduler
_a : Dict = get_linear_schedule_with_warmup(
optimizer=__a ,num_warmup_steps=100 ,num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : Tuple = accelerator.prepare(
__a ,__a ,__a ,__a ,__a )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_a : Optional[int] = os.path.split(__a )[-1].split('''.''' )[0]
accelerator.init_trackers(__a ,__a )
# Now we train the model
for epoch in range(__a ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_a : Tuple = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : int = model(**__a )
_a : Optional[int] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_a : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_a : Optional[int] = model(**__a )
_a : Union[str, Any] = outputs.logits.argmax(dim=-1 )
_a , _a : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a ,references=__a ,)
_a : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,__a )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(__a ),
'''epoch''': epoch,
} ,step=__a ,)
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=__a ,default=__a ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' ,action='''store_true''' ,help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' ,)
parser.add_argument(
'''--project_dir''' ,type=__a ,default='''logs''' ,help='''Location on where to store experiment tracking logs` and relevent project information''' ,)
_a : Optional[int] = parser.parse_args()
_a : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a ,__a )
if __name__ == "__main__":
main()
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
from __future__ import annotations
from math import pi
def __UpperCAmelCase ( __a : float ,__a : float ,__a : float ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = LDMTextToImagePipeline
UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
UpperCAmelCase__ : int = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
UpperCAmelCase__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_a : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_a : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_a : Tuple = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
_a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a : int = CLIPTextModel(_a )
_a : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowercase ( self , _a , _a=0 ) -> List[Any]:
if str(_a ).startswith('''mps''' ):
_a : Any = torch.manual_seed(_a )
else:
_a : Tuple = torch.Generator(device=_a ).manual_seed(_a )
_a : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> int:
_a : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : Tuple = self.get_dummy_components()
_a : str = LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[Any] = self.get_dummy_inputs(_a )
_a : Optional[Any] = pipe(**_a ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
_a : List[Any] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , _a , _a=torch.floataa , _a=0 ) -> List[Any]:
_a : List[str] = torch.manual_seed(_a )
_a : Dict = np.random.RandomState(_a ).standard_normal((1, 4, 3_2, 3_2) )
_a : List[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_a : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Any = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : List[str] = self.get_inputs(_a )
_a : Optional[Any] = pipe(**_a ).images
_a : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_a : Union[str, Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
_a : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , _a , _a=torch.floataa , _a=0 ) -> Dict:
_a : Optional[int] = torch.manual_seed(_a )
_a : List[str] = np.random.RandomState(_a ).standard_normal((1, 4, 3_2, 3_2) )
_a : List[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_a : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> List[str]:
_a : str = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : int = self.get_inputs(_a )
_a : Tuple = pipe(**_a ).images[0]
_a : List[str] = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
_a : Optional[int] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
def __UpperCAmelCase ( __a : int ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : Any = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 0:
return False
_a : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
a__ = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a__ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a__ = '''hopper-medium-v2'''
a__ = gym.make(env_name)
a__ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a__ = env.reset()
a__ = 0
a__ = 0
a__ = 1000
a__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a__ = pipeline(obs, planning_horizon=32)
# execute action in environment
a__ , a__ , a__ , a__ = env.step(denorm_actions)
a__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
a__ = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
def __UpperCAmelCase ( __a : list[list[int | float]] ) -> int:
"""simple docstring"""
_a : str = len(__a )
_a : Tuple = len(matrix[0] )
_a : Dict = min(__a ,__a )
for row in range(__a ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 ,__a ):
_a : Dict = matrix[col][row] / matrix[row][row]
for i in range(__a ,__a ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_a : List[str] = True
for i in range(row + 1 ,__a ):
if matrix[i][row] != 0:
_a , _a : Any = matrix[i], matrix[row]
_a : Any = False
break
if reduce:
rank -= 1
for i in range(__a ):
_a : int = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : int = argparse.ArgumentParser()
parser.add_argument(
'''-m''' ,'''--pretrained_model_name_or_path''' ,type=__a ,default=__a ,required=__a ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,)
parser.add_argument(
'''-c''' ,'''--caption''' ,type=__a ,default='''robotic cat with wings''' ,help='''Text used to generate images.''' ,)
parser.add_argument(
'''-n''' ,'''--images_num''' ,type=__a ,default=4 ,help='''How much images to generate.''' ,)
parser.add_argument(
'''-s''' ,'''--seed''' ,type=__a ,default=42 ,help='''Seed for random process.''' ,)
parser.add_argument(
'''-ci''' ,'''--cuda_id''' ,type=__a ,default=0 ,help='''cuda_id.''' ,)
_a : Dict = parser.parse_args()
return args
def __UpperCAmelCase ( __a : str ,__a : List[Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
if not len(__a ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_a , _a : Tuple = imgs[0].size
_a : Tuple = Image.new('''RGB''' ,size=(cols * w, rows * h) )
_a , _a : str = grid.size
for i, img in enumerate(__a ):
grid.paste(__a ,box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Optional[int]="robotic cat with wings" ,__a : Any=7.5 ,__a : Any=50 ,__a : int=1 ,__a : List[str]=42 ,) -> Any:
"""simple docstring"""
_a : int = torch.Generator(pipeline.device ).manual_seed(__a )
_a : Optional[int] = pipeline(
__a ,guidance_scale=__a ,num_inference_steps=__a ,generator=__a ,num_images_per_prompt=__a ,).images
_a : List[str] = int(math.sqrt(__a ) )
_a : Any = image_grid(__a ,rows=_rows ,cols=num_images_per_prompt // _rows )
return grid, images
a__ = parse_args()
# Load models and create wrapper for stable diffusion
a__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
a__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
a__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
a__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
a__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
a__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
a__ = unet.to(torch.device('''cuda''', args.cuda_id))
a__ = pipeline.to(unet.device)
a__ , a__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
a__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
def __UpperCAmelCase ( __a : str ,__a : int ) -> str:
"""simple docstring"""
_a : list[list[str]] = [[] for _ in range(__a )]
_a : List[str] = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__a ) <= key:
return input_string
for position, character in enumerate(__a ):
_a : str = position % (lowest * 2) # puts it in bounds
_a : Union[str, Any] = min(__a ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__a )
_a : str = [''''''.join(__a ) for row in temp_grid]
_a : Dict = ''''''.join(__a )
return output_string
def __UpperCAmelCase ( __a : str ,__a : int ) -> str:
"""simple docstring"""
_a : List[str] = []
_a : Dict = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
_a : list[list[str]] = [[] for _ in range(__a )] # generates template
for position in range(len(__a ) ):
_a : str = position % (lowest * 2) # puts it in bounds
_a : str = min(__a ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
_a : Union[str, Any] = 0
for row in temp_grid: # fills in the characters
_a : int = input_string[counter : counter + len(__a )]
grid.append(list(__a ) )
counter += len(__a )
_a : str = '''''' # reads as zigzag
for position in range(len(__a ) ):
_a : List[Any] = position % (lowest * 2) # puts it in bounds
_a : Optional[int] = min(__a ,lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __UpperCAmelCase ( __a : str ) -> dict[int, str]:
"""simple docstring"""
_a : Optional[Any] = {}
for key_guess in range(1 ,len(__a ) ): # tries every key
_a : Union[str, Any] = decrypt(__a ,__a )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ,__a : Optional[int] ,__a : List[Any]=None ,__a : List[Any]=None ) -> str:
"""simple docstring"""
if "." in tensor_name:
_a : Optional[int] = tensor_name.split('''.''' )
for split in splits[:-1]:
_a : str = getattr(__a ,__a )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
_a : int = new_module
_a : Dict = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
_a : List[str] = tensor_name in module._buffers
_a : Union[str, Any] = getattr(__a ,__a )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
_a : Optional[int] = False
_a : Optional[int] = False
if is_buffer or not is_bitsandbytes_available():
_a : Optional[Any] = False
_a : Tuple = False
else:
_a : Dict = hasattr(bnb.nn ,'''Params4bit''' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : Optional[int] = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : List[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : str = old_value.to(__a )
elif isinstance(__a ,torch.Tensor ):
_a : List[str] = value.to('''cpu''' )
if value.dtype == torch.inta:
_a : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
_a : Dict = torch.tensor(__a ,device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,__a ) and fpaa_statistics is None:
_a : List[Any] = new_value.T
_a : Optional[int] = old_value.__dict__
if is_abit:
_a : Optional[int] = bnb.nn.IntaParams(__a ,requires_grad=__a ,**__a ).to(__a )
elif is_abit:
_a : int = bnb.nn.Paramsabit(__a ,requires_grad=__a ,**__a ).to(__a )
_a : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'''SCB''' ,fpaa_statistics.to(__a ) )
else:
if value is None:
_a : Union[str, Any] = old_value.to(__a )
elif isinstance(__a ,torch.Tensor ):
_a : Union[str, Any] = value.to(__a )
else:
_a : int = torch.tensor(__a ,device=__a )
if is_buffer:
_a : Any = new_value
else:
_a : int = nn.Parameter(__a ,requires_grad=old_value.requires_grad )
_a : Optional[int] = new_value
def __UpperCAmelCase ( __a : List[Any] ,__a : str=None ,__a : Dict=None ,__a : int=None ,__a : List[str]=False ) -> List[Any]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
_a : str = []
current_key_name.append(__a )
if (isinstance(__a ,nn.Linear ) or isinstance(__a ,__a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(__a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__a ,__a ):
_a , _a : Any = module.weight.shape
else:
_a : List[str] = module.in_features
_a : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : int = bnb.nn.LinearabitLt(
__a ,__a ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Union[str, Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : List[str] = bnb.nn.Linearabit(
__a ,__a ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : Tuple = True
# Store the module class in case we need to transpose the weight later
_a : str = type(__a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__a )
if len(list(module.children() ) ) > 0:
_a , _a : Any = _replace_with_bnb_linear(
__a ,__a ,__a ,__a ,has_been_replaced=__a ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCAmelCase ( __a : Optional[int] ,__a : Optional[Any]=None ,__a : str=None ,__a : List[Any]=None ) -> str:
"""simple docstring"""
_a : Tuple = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : Tuple = _replace_with_bnb_linear(
__a ,__a ,__a ,__a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __UpperCAmelCase ( *__a : int ,**__a : Dict ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' ,__a ,)
return replace_with_bnb_linear(*__a ,**__a )
def __UpperCAmelCase ( *__a : Union[str, Any] ,**__a : List[Any] ) -> str:
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' ,__a ,)
return set_module_quantized_tensor_to_device(*__a ,**__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : int = deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : Union[str, Any] = find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a ,__a ):
_a : Any = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : str = sum(__a ,[] )
_a : Tuple = len(__a ) > 0
# Check if it is a base model
_a : Tuple = not hasattr(__a ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] = list(model.named_children() )
_a : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
_a : Optional[Any] = set(__a ) - set(__a )
_a : Dict = list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
_a : List[str] = ['''.weight''', '''.bias''']
_a : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any = name.replace(__a ,'''''' )
filtered_module_names.append(__a )
return filtered_module_names
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "transfo-xl"
UpperCAmelCase__ : str = ["mems"]
UpperCAmelCase__ : Union[str, Any] = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=2_6_7_7_3_5 , _a=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , _a=1_0_2_4 , _a=1_0_2_4 , _a=1_6 , _a=6_4 , _a=4_0_9_6 , _a=4 , _a=False , _a=1_8 , _a=1_6_0_0 , _a=1_0_0_0 , _a=True , _a=True , _a=0 , _a=-1 , _a=True , _a=0.1 , _a=0.0 , _a=True , _a="normal" , _a=0.01 , _a=0.01 , _a=0.02 , _a=1e-5 , _a=0 , **_a , ) -> Optional[int]:
_a : int = vocab_size
_a : int = []
self.cutoffs.extend(_a )
if proj_share_all_but_first:
_a : Union[str, Any] = [False] + [True] * len(self.cutoffs )
else:
_a : Optional[Any] = [False] + [False] * len(self.cutoffs )
_a : Optional[Any] = d_model
_a : Union[str, Any] = d_embed
_a : Optional[int] = d_head
_a : int = d_inner
_a : List[str] = div_val
_a : Union[str, Any] = pre_lnorm
_a : int = n_layer
_a : str = n_head
_a : List[Any] = mem_len
_a : str = same_length
_a : int = attn_type
_a : Tuple = clamp_len
_a : Optional[Any] = sample_softmax
_a : str = adaptive
_a : Optional[int] = dropout
_a : Tuple = dropatt
_a : str = untie_r
_a : Union[str, Any] = init
_a : Optional[Any] = init_range
_a : Optional[Any] = proj_init_std
_a : str = init_std
_a : List[str] = layer_norm_epsilon
super().__init__(eos_token_id=_a , **_a )
@property
def __lowercase ( self ) -> str:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __lowercase ( self , _a ) -> Optional[int]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 15 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : str ,__a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __UpperCAmelCase ( __a : np.ndarray ,__a : Optional[str] ,__a : Optional[str] ) -> List[Any]:
"""simple docstring"""
_a : str = to_pil_image(__a )
_a , _a : Optional[Any] = pil_image.size
_a : Tuple = pytesseract.image_to_data(__a ,lang=__a ,output_type='''dict''' ,config=__a )
_a , _a , _a , _a , _a : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
_a : Dict = [idx for idx, word in enumerate(__a ) if not word.strip()]
_a : str = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices]
_a : List[str] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : str = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
_a : Union[str, Any] = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : int = []
for x, y, w, h in zip(__a ,__a ,__a ,__a ):
_a : List[str] = [x, y, x + w, y + h]
actual_boxes.append(__a )
# finally, normalize the bounding boxes
_a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__a ,__a ,__a ) )
assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , _a = None , _a = "" , **_a , ) -> None:
super().__init__(**_a )
_a : List[str] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_a : Union[str, Any] = get_size_dict(_a )
_a : int = do_resize
_a : Optional[int] = size
_a : str = resample
_a : str = do_rescale
_a : Any = rescale_value
_a : Optional[Any] = do_normalize
_a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
_a : List[Any] = apply_ocr
_a : Optional[int] = ocr_lang
_a : Tuple = tesseract_config
def __lowercase ( self , _a , _a , _a = PILImageResampling.BILINEAR , _a = None , **_a , ) -> np.ndarray:
_a : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_a : Optional[int] = (size['''height'''], size['''width'''])
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a = None , **_a , ) -> np.ndarray:
return rescale(_a , scale=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __lowercase ( self , _a , _a = None , _a = None , _a=None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
_a : Optional[int] = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : Any = get_size_dict(_a )
_a : List[str] = resample if resample is not None else self.resample
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : str = image_mean if image_mean is not None else self.image_mean
_a : Tuple = image_std if image_std is not None else self.image_std
_a : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_a : int = ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Optional[int] = tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[Any] = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
_a : Any = [to_numpy_array(_a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
_a : str = []
_a : str = []
for image in images:
_a , _a : Union[str, Any] = apply_tesseract(_a , _a , _a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
_a : List[str] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
_a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_a : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
_a : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a )
if apply_ocr:
_a : Optional[int] = words_batch
_a : List[Any] = boxes_batch
return data
| 15 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a = None , _a = None , _a = True , _a = None , _a = False , _a = None , _a = True , _a = "arrow" , **_a , ) -> Union[str, Any]:
super().__init__(
split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , **_a , )
_a : Optional[int] = load_from_cache_file
_a : List[Any] = file_format
_a : Optional[int] = Spark(
df=_a , features=_a , cache_dir=_a , working_dir=_a , **_a , )
def __lowercase ( self ) -> Optional[Any]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_a : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=_a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : int = ArgumentParser('''Accelerate CLI tool''' ,usage='''accelerate <command> [<args>]''' ,allow_abbrev=__a )
_a : Optional[int] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__a )
env_command_parser(subparsers=__a )
launch_command_parser(subparsers=__a )
tpu_command_parser(subparsers=__a )
test_command_parser(subparsers=__a )
# Let's go
_a : Dict = parser.parse_args()
if not hasattr(__a ,'''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a__ = random.Random()
def __UpperCAmelCase ( __a : Tuple ,__a : str=1.0 ,__a : Optional[int]=None ,__a : List[Any]=None ) -> Any:
"""simple docstring"""
if rng is None:
_a : Dict = global_rng
_a : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _a , _a=7 , _a=4_0_0 , _a=2_0_0_0 , _a=2_0_4_8 , _a=1_2_8 , _a=1 , _a=5_1_2 , _a=3_0 , _a=4_4_1_0_0 , ) -> List[Any]:
_a : Optional[Any] = parent
_a : str = batch_size
_a : List[str] = min_seq_length
_a : str = max_seq_length
_a : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a : List[Any] = spectrogram_length
_a : List[str] = feature_size
_a : List[Any] = num_audio_channels
_a : Tuple = hop_length
_a : Optional[int] = chunk_length
_a : int = sampling_rate
def __lowercase ( self ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase ( self , _a=False , _a=False ) -> List[Any]:
def _flatten(_a ):
return list(itertools.chain(*_a ) )
if equal_length:
_a : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a : str = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = TvltFeatureExtractor
def __lowercase ( self ) -> Dict:
_a : List[str] = TvltFeatureExtractionTester(self )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_a , '''feature_size''' ) )
self.assertTrue(hasattr(_a , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_a , '''hop_length''' ) )
self.assertTrue(hasattr(_a , '''chunk_length''' ) )
self.assertTrue(hasattr(_a , '''sampling_rate''' ) )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_a : Dict = self.feature_extraction_class.from_pretrained(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Union[str, Any] = feat_extract_second.to_dict()
_a : Any = dict_first.pop('''mel_filters''' )
_a : int = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Optional[int]:
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_a : List[str] = self.feature_extraction_class.from_json_file(_a )
_a : List[Any] = feat_extract_first.to_dict()
_a : Dict = feat_extract_second.to_dict()
_a : str = dict_first.pop('''mel_filters''' )
_a : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
# Initialize feature_extractor
_a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_a : List[str] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_a : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a : Dict = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a : Union[str, Any] = feature_extractor(
_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_a : int = np.asarray(_a )
_a : Tuple = feature_extractor(_a , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a : Optional[int] = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase ( self ) -> int:
_a : Union[str, Any] = self._load_datasamples(1 )
_a : int = TvltFeatureExtractor()
_a : Union[str, Any] = feature_extractor(_a , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_a : Union[str, Any] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1e-4 ) )
| 15 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a__ = 16
a__ = 32
def __UpperCAmelCase ( __a : Accelerator ,__a : int = 16 ,__a : str = "bert-base-cased" ) -> int:
"""simple docstring"""
_a : int = AutoTokenizer.from_pretrained(__a )
_a : Optional[Any] = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__a : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_a : int = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__a ,max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_a : Optional[Any] = datasets.map(
__a ,batched=__a ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : Union[str, Any] = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__a : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(__a ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
_a : Dict = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__a ,collate_fn=__a ,batch_size=__a )
_a : List[str] = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__a ,collate_fn=__a ,batch_size=__a )
return train_dataloader, eval_dataloader
def __UpperCAmelCase ( __a : str ,__a : Tuple ) -> List[str]:
"""simple docstring"""
_a : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[int] = config['''lr''']
_a : Optional[int] = int(config['''num_epochs'''] )
_a : Optional[Any] = int(config['''seed'''] )
_a : Union[str, Any] = int(config['''batch_size'''] )
_a : List[str] = args.model_name_or_path
set_seed(__a )
_a , _a : List[Any] = get_dataloaders(__a ,__a ,__a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : str = AutoModelForSequenceClassification.from_pretrained(__a ,return_dict=__a )
# Instantiate optimizer
_a : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_a : Union[str, Any] = optimizer_cls(params=model.parameters() ,lr=__a )
if accelerator.state.deepspeed_plugin is not None:
_a : Dict = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_a : str = 1
_a : Optional[Any] = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_a : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__a ,num_warmup_steps=0 ,num_training_steps=__a ,)
else:
_a : Optional[int] = DummyScheduler(__a ,total_num_steps=__a ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a , _a , _a , _a , _a : str = accelerator.prepare(
__a ,__a ,__a ,__a ,__a )
# We need to keep track of how many total steps we have iterated over
_a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_a : Dict = 0
# Now we train the model
_a : Dict = evaluate.load('''glue''' ,'''mrpc''' )
_a : Union[str, Any] = 0
_a : int = {}
for epoch in range(__a ,__a ):
model.train()
for step, batch in enumerate(__a ):
_a : List[Any] = model(**__a )
_a : Any = outputs.loss
_a : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_a : Dict = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_a , _a : Tuple = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
_a : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_a : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a ,references=__a ,)
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,__a )
_a : List[Any] = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
_a : Union[str, Any] = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''all_results.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : int = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=__a ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__a ,)
parser.add_argument(
'''--output_dir''' ,type=__a ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--performance_lower_bound''' ,type=__a ,default=__a ,help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=__a ,default=3 ,help='''Number of train epochs.''' ,)
_a : Union[str, Any] = parser.parse_args()
_a : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a ,__a )
if __name__ == "__main__":
main()
| 15 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
@add_end_docstrings(
__lowercase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self , _a ) -> np.ndarray:
if self.framework == "tf":
_a : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __lowercase ( self , _a ) -> np.ndarray:
_a : int = self.get_masked_index(_a )
_a : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowercase ( self , _a ) -> Optional[int]:
if isinstance(_a , _a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_a )
def __lowercase ( self , _a , _a=None , **_a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
_a : Union[str, Any] = self.framework
_a : str = self.tokenizer(_a , return_tensors=_a )
self.ensure_exactly_one_mask_token(_a )
return model_inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : List[str] = self.model(**_a )
_a : Any = model_inputs['''input_ids''']
return model_outputs
def __lowercase ( self , _a , _a=5 , _a=None ) -> str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_a : List[Any] = target_ids.shape[0]
_a : Any = model_outputs['''input_ids'''][0]
_a : List[str] = model_outputs['''logits''']
if self.framework == "tf":
_a : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a : List[str] = outputs.numpy()
_a : Dict = outputs[0, masked_index, :]
_a : str = stable_softmax(_a , axis=-1 )
if target_ids is not None:
_a : Any = tf.gather_nd(tf.squeeze(_a , 0 ) , target_ids.reshape(-1 , 1 ) )
_a : Union[str, Any] = tf.expand_dims(_a , 0 )
_a : Optional[int] = tf.math.top_k(_a , k=_a )
_a , _a : Optional[Any] = topk.values.numpy(), topk.indices.numpy()
else:
_a : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a : List[str] = outputs[0, masked_index, :]
_a : List[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_a : List[Any] = probs[..., target_ids]
_a , _a : Optional[Any] = probs.topk(_a )
_a : Dict = []
_a : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
_a : Tuple = target_ids[p].tolist()
_a : List[str] = p
# Filter padding out:
_a : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a : List[str] = self.tokenizer.decode(_a , skip_special_tokens=_a )
_a : List[Any] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_a )
result.append(_a )
if single_mask:
return result[0]
return result
def __lowercase ( self , _a , _a=None ) -> Dict:
if isinstance(_a , _a ):
_a : Tuple = [targets]
try:
_a : int = self.tokenizer.get_vocab()
except Exception:
_a : Any = {}
_a : List[Any] = []
for target in targets:
_a : List[Any] = vocab.get(_a , _a )
if id_ is None:
_a : Tuple = self.tokenizer(
_a , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , max_length=1 , truncation=_a , )['''input_ids''']
if len(_a ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
_a : List[str] = list(set(_a ) )
if len(_a ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a : int = np.array(_a )
return target_ids
def __lowercase ( self , _a=None , _a=None ) -> Tuple:
_a : str = {}
if targets is not None:
_a : List[Any] = self.get_target_ids(_a , _a )
_a : Optional[Any] = target_ids
if top_k is not None:
_a : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _a , *_a , **_a ) -> int:
_a : Optional[Any] = super().__call__(_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
| 15 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 15 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Any = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : Dict = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str]="eval" ) -> Any:
"""simple docstring"""
_a : Any = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
_a : Any = self.get_auto_remove_tmp_dir()
_a : Optional[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> Dict:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[str] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> Optional[int]:
_a : str = self.get_auto_remove_tmp_dir()
_a : Optional[int] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[int] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Optional[Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Any = 7 if get_gpu_count() > 1 else 2
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : Dict = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Any:
_a : Optional[int] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 15 | 1 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 |
import argparse
import os
import re
import packaging.version
a__ = '''examples/'''
a__ = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a__ = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
a__ = '''README.md'''
def __UpperCAmelCase ( __a : List[str] ,__a : int ,__a : Optional[Any] ) -> int:
"""simple docstring"""
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Tuple = f.read()
_a , _a : str = REPLACE_PATTERNS[pattern]
_a : List[str] = replace.replace('''VERSION''' ,__a )
_a : List[Any] = re_pattern.sub(__a ,__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__a )
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a ,__a ) ,__a ,pattern='''examples''' )
def __UpperCAmelCase ( __a : List[Any] ,__a : List[str]=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a ,__a ,__a )
if not patch:
update_version_in_examples(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : Optional[Any] = '''🤗 Transformers currently provides the following architectures'''
_a : str = '''1. Want to contribute a new model?'''
with open(__a ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
_a : Optional[int] = f.readlines()
# Find the start of the list.
_a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
_a : Tuple = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__a ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__a )
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
_a : Optional[Any] = f.read()
_a : Optional[Any] = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def __UpperCAmelCase ( __a : Dict=False ) -> str:
"""simple docstring"""
_a : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
_a : List[Any] = default_version.base_version
elif patch:
_a : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_a : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_a : Dict = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__a ) == 0:
_a : int = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__a ,patch=__a )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
_a : str = get_version()
_a : int = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_a : List[Any] = current_version.base_version
# Check with the user we got that right.
_a : Union[str, Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__a ) == 0:
_a : List[str] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__a )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 15 | 1 |
import sys
import turtle
def __UpperCAmelCase ( __a : tuple[float, float] ,__a : tuple[float, float] ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __UpperCAmelCase ( __a : tuple[float, float] ,__a : tuple[float, float] ,__a : tuple[float, float] ,__a : int ,) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 )
triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 )
triangle(__a ,get_mid(__a ,__a ) ,get_mid(__a ,__a ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
a__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
a__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 15 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = ["vqvae"]
def __init__( self , _a , _a , _a , _a , ) -> Any:
super().__init__()
self.register_modules(unet=_a , scheduler=_a , mel=_a , vqvae=_a )
def __lowercase ( self ) -> int:
return 5_0 if isinstance(self.scheduler , _a ) else 1_0_0_0
@torch.no_grad()
def __call__( self , _a = 1 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = None , _a = 0 , _a = 0 , _a = None , _a = 0 , _a = None , _a = None , _a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
_a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
_a : str = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_a : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_a : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_a , device=self.device , )
_a : str = noise
_a : Dict = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a , _a )
_a : Any = self.mel.audio_slice_to_image(_a )
_a : Tuple = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
_a : Optional[Any] = (input_image / 2_5_5) * 2 - 1
_a : List[str] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_a : List[str] = self.vqvae.encode(torch.unsqueeze(_a , 0 ) ).latent_dist.sample(
generator=_a )[0]
_a : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_a : List[str] = self.scheduler.add_noise(_a , _a , self.scheduler.timesteps[start_step - 1] )
_a : List[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_a : List[Any] = int(mask_start_secs * pixels_per_second )
_a : int = int(mask_end_secs * pixels_per_second )
_a : Any = self.scheduler.add_noise(_a , _a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _a ):
_a : Tuple = self.unet(_a , _a , _a )['''sample''']
else:
_a : Tuple = self.unet(_a , _a )['''sample''']
if isinstance(self.scheduler , _a ):
_a : Tuple = self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , eta=_a , generator=_a , )['''prev_sample''']
else:
_a : int = self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , generator=_a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_a : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
_a : int = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_a : Dict = 1 / self.vqvae.config.scaling_factor * images
_a : Optional[Any] = self.vqvae.decode(_a )['''sample''']
_a : Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 )
_a : str = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
_a : Optional[int] = (images * 2_5_5).round().astype('''uint8''' )
_a : List[str] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a , mode='''RGB''' ).convert('''L''' ) for _ in images) )
_a : List[str] = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) , **ImagePipelineOutput(_a ) )
@torch.no_grad()
def __lowercase ( self , _a , _a = 5_0 ) -> np.ndarray:
assert isinstance(self.scheduler , _a )
self.scheduler.set_timesteps(_a )
_a : List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
_a : int = (sample / 2_5_5) * 2 - 1
_a : List[Any] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
_a : Dict = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_a : List[Any] = self.scheduler.alphas_cumprod[t]
_a : List[str] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_a : List[Any] = 1 - alpha_prod_t
_a : int = self.unet(_a , _a )['''sample''']
_a : Tuple = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_a : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_a : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowercase ( _a , _a , _a ) -> torch.Tensor:
_a : Dict = acos(torch.dot(torch.flatten(_a ) , torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 15 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''LayoutLMv3FeatureExtractor''']
a__ = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
import numpy as np
def __UpperCAmelCase ( __a : np.ndarray ,__a : np.ndarray ,__a : float = 1E-12 ,__a : int = 100 ,) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(__a )[0] == np.shape(__a )[1]
# Ensure proper dimensionality.
assert np.shape(__a )[0] == np.shape(__a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__a ) == np.iscomplexobj(__a )
_a : List[str] = np.iscomplexobj(__a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__a ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a : List[str] = False
_a : List[str] = 0
_a : Tuple = 0
_a : str = 1E12
while not convergence:
# Multiple matrix by the vector.
_a : str = np.dot(__a ,__a )
# Normalize the resulting output vector.
_a : List[Any] = w / np.linalg.norm(__a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a : Dict = vector.conj().T if is_complex else vector.T
_a : Tuple = np.dot(__a ,np.dot(__a ,__a ) )
# Check convergence.
_a : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a : Dict = True
_a : str = lambda_
if is_complex:
_a : Tuple = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_a : List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a : int = np.array([41, 4, 20] )
_a : Optional[Any] = real_input_matrix.astype(np.complexaaa )
_a : int = np.triu(1j * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a : Optional[int] = real_input_matrix
_a : Union[str, Any] = real_vector
elif problem_type == "complex":
_a : str = complex_input_matrix
_a : str = complex_vector
# Our implementation.
_a , _a : Optional[Any] = power_iteration(__a ,__a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a : List[str] = np.linalg.eigh(__a )
# Last eigenvalue is the maximum one.
_a : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__a ) - np.abs(__a ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
_a : Dict = str(bin(__a ) )
binary_number += "0" * shift_amount
return binary_number
def __UpperCAmelCase ( __a : int ,__a : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
_a : Optional[int] = str(bin(__a ) )[2:]
if shift_amount >= len(__a ):
return "0b0"
_a : Dict = binary_number[: len(__a ) - shift_amount]
return "0b" + shifted_binary_number
def __UpperCAmelCase ( __a : int ,__a : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
_a : Dict = '''0''' + str(bin(__a ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
_a : Dict = len(bin(__a )[3:] ) # Find 2's complement of number
_a : List[Any] = bin(abs(__a ) - (1 << binary_number_length) )[3:]
_a : Dict = (
'''1''' + '''0''' * (binary_number_length - len(__a )) + binary_number
)
if shift_amount >= len(__a ):
return "0b" + binary_number[0] * len(__a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ : Optional[datasets.Features] = None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase__ : Any = PandasConfig
def __lowercase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self , _a ) -> List[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_a : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_a : Dict = data_files
if isinstance(_a , _a ):
_a : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : int = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_a : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_a : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a : Any = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __lowercase ( self , _a ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema )
return pa_table
def __lowercase ( self , _a ) -> List[str]:
for i, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_a : str = pa.Table.from_pandas(pd.read_pickle(_a ) )
yield i, self._cast_table(_a )
| 15 | 1 |
from datetime import datetime as dt
import os
from github import Github
a__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : str = Github(os.environ['''GITHUB_TOKEN'''] )
_a : List[str] = g.get_repo('''huggingface/transformers''' )
_a : int = repo.get_issues(state='''open''' )
for issue in open_issues:
_a : str = sorted([comment for comment in issue.get_comments()] ,key=lambda __a : i.created_at ,reverse=__a )
_a : Optional[int] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 15 |
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __UpperCAmelCase ( __a : str ,__a : str ,__a : List[str]=None ,__a : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
_a : Optional[int] = tf.cast(tf.math.not_equal(__a ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : List[Any] = OPTConfig
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Tuple = "gelu"
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=False , _a=9_9 , _a=1_6 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=2_0 , _a=2 , _a=1 , _a=0 , _a=1_6 , _a=1_6 , ) -> Union[str, Any]:
_a : List[str] = parent
_a : Tuple = batch_size
_a : List[str] = seq_length
_a : Optional[Any] = is_training
_a : List[str] = use_labels
_a : Optional[int] = vocab_size
_a : Optional[Any] = hidden_size
_a : List[Any] = num_hidden_layers
_a : Any = num_attention_heads
_a : int = intermediate_size
_a : List[str] = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : Optional[Any] = max_position_embeddings
_a : Tuple = eos_token_id
_a : Dict = pad_token_id
_a : int = bos_token_id
_a : Optional[int] = embed_dim
_a : Dict = word_embed_proj_dim
_a : Tuple = False
def __lowercase ( self ) -> int:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_a : Dict = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_a , **self.config_updates , )
_a : Optional[Any] = prepare_opt_inputs_dict(_a , _a )
return config, inputs_dict
def __lowercase ( self , _a , _a ) -> Union[str, Any]:
_a : int = TFOPTModel(config=_a )
_a : int = inputs_dict['''input_ids''']
_a : Optional[int] = input_ids[:1, :]
_a : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
_a : Union[str, Any] = 1
# first forward pass
_a : Any = model(_a , attention_mask=_a , use_cache=_a )
_a , _a : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a : str = tf.concat([input_ids, next_tokens] , axis=-1 )
_a : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a : str = model(_a , attention_mask=_a )[0]
_a : List[str] = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a : str = output_from_no_past[:, -3:, random_slice_idx]
_a : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ : Union[str, Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ : Tuple = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Any = 10
def __lowercase ( self ) -> int:
_a : str = TFOPTModelTester(self )
_a : int = ConfigTester(self , config_class=_a )
def __lowercase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Optional[int]:
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def __lowercase ( self ) -> Tuple:
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_a , _a ):
if hasattr(_a , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_a , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
_a : List[str] = model_class(config=_a )
_a : List[str] = _get_word_embedding_weight(_a , model.get_input_embeddings() )
_a : Union[str, Any] = _get_word_embedding_weight(_a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_a )
_a : List[Any] = _get_word_embedding_weight(_a , model.get_input_embeddings() )
_a : List[str] = _get_word_embedding_weight(_a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_a : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _a )
# check that weights remain the same after resizing
_a : Any = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_a : Union[str, Any] = False
self.assertTrue(_a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _a )
_a : List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_a : Optional[Any] = False
self.assertTrue(_a )
def __UpperCAmelCase ( __a : List[str] ) -> Dict:
"""simple docstring"""
return tf.constant(__a ,dtype=tf.intaa )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = 99
def __lowercase ( self ) -> List[str]:
_a : Dict = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_a : Optional[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_a : str = input_ids.shape[0]
_a : Tuple = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ) -> List[Any]:
_a : int = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
_a : Tuple = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_a : List[Any] = tf.not_equal(_a , model.config.pad_token_id )
with tf.GradientTape():
_a : Dict = model(input_ids=_a , attention_mask=_a ).last_hidden_state
_a : Union[str, Any] = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , _a )
_a : Tuple = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=4e-3 ) )
_a : Optional[int] = tf.function(_a , jit_compile=_a )
_a : Dict = xla_generate(_a , _a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=4e-2 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[Any]:
super().setUp()
_a : Dict = '''facebook/opt-350m'''
def __lowercase ( self ) -> List[str]:
_a : str = TFOPTForCausalLM.from_pretrained(self.path_model )
_a : Tuple = GPTaTokenizer.from_pretrained(self.path_model )
_a : str = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_a : Any = tokenizer(_a , return_tensors='''tf''' , padding=_a , add_special_tokens=_a )
_a : Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_a : Dict = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(_a , _a , atol=1e-4 ) )
_a : str = tf.function(_a , jit_compile=_a )
_a : Optional[int] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_a , _a , atol=1e-4 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Any:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowercase ( self ) -> str:
_a : List[Any] = '''facebook/opt-125m'''
_a : Optional[Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_a : str = []
_a : List[Any] = GPTaTokenizer.from_pretrained(_a )
_a : Tuple = TFOPTForCausalLM.from_pretrained(_a )
for prompt in self.prompts:
_a : Union[str, Any] = tokenizer(_a , return_tensors='''tf''' ).input_ids
_a : Union[str, Any] = model.generate(_a , max_length=1_0 )
_a : str = tokenizer.batch_decode(_a , skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> Tuple:
_a : Any = '''facebook/opt-350m'''
_a : Any = GPTaTokenizer.from_pretrained(_a )
_a : Any = TFOPTForCausalLM.from_pretrained(_a )
_a : Dict = '''left'''
# use different length sentences to test batching
_a : Dict = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_a : int = tokenizer(_a , return_tensors='''tf''' , padding=_a )
_a : List[str] = inputs['''input_ids''']
_a : Optional[Any] = model.generate(input_ids=_a , attention_mask=inputs['''attention_mask'''] )
_a : Any = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
_a : Tuple = model.generate(input_ids=_a )
_a : str = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
_a : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
_a : Tuple = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
_a : str = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_a : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
_a : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
_a : List[str] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
def __lowercase ( self ) -> str:
_a : Union[str, Any] = '''facebook/opt-350m'''
_a : Optional[Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_a : List[Any] = []
_a : Any = GPTaTokenizer.from_pretrained(_a )
_a : Any = TFOPTForCausalLM.from_pretrained(_a )
for prompt in self.prompts:
_a : str = tokenizer(_a , return_tensors='''tf''' ).input_ids
_a : List[Any] = model.generate(_a , max_length=1_0 )
_a : List[str] = tokenizer.batch_decode(_a , skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a , _a )
| 15 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = '''\
'''
a__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def __lowercase ( self , _a , _a , _a = 1_6 , _a = True , _a=None ) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_a : List[str] = '''cuda'''
else:
_a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_a : Dict = AutoModelForCausalLM.from_pretrained(_a )
_a : List[Any] = model.to(_a )
_a : List[str] = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_a : List[Any] = model.config.max_length - 1
else:
_a : List[str] = model.config.max_length
_a : Union[str, Any] = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
_a : List[Any] = encodings['''input_ids''']
_a : int = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_a : Optional[int] = []
_a : Dict = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
_a : Dict = min(start_index + batch_size , len(_a ) )
_a : Union[str, Any] = encoded_texts[start_index:end_index]
_a : int = attn_masks[start_index:end_index]
if add_start_token:
_a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
_a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_a : Dict = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
_a : Dict = encoded_batch
with torch.no_grad():
_a : Any = model(_a , attention_mask=_a ).logits
_a : List[str] = out_logits[..., :-1, :].contiguous()
_a : Union[str, Any] = labels[..., 1:].contiguous()
_a : Optional[int] = attn_mask[..., 1:].contiguous()
_a : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 15 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
a__ = Mapping[str, np.ndarray]
a__ = Mapping[str, Any] # Is a nested dict.
a__ = 0.01
@dataclasses.dataclass(frozen=__lowercase )
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCAmelCase__ : Optional[Sequence[int]] = None
def __UpperCAmelCase ( __a : str ) -> Protein:
"""simple docstring"""
_a : int = R'''(\[[A-Z]+\]\n)'''
_a : List[str] = [tag.strip() for tag in re.split(__a ,__a ) if len(__a ) > 0]
_a : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] ,[l.split('''\n''' ) for l in tags[1::2]] )
_a : List[str] = ["N", "CA", "C"]
_a : Tuple = None
_a : int = None
_a : int = None
for g in groups:
if "[PRIMARY]" == g[0]:
_a : Any = g[1][0].strip()
for i in range(len(__a ) ):
if seq[i] not in residue_constants.restypes:
_a : Any = '''X''' # FIXME: strings are immutable
_a : Dict = np.array(
[residue_constants.restype_order.get(__a ,residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_a : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__a ,g[1][axis].split() ) ) )
_a : Union[str, Any] = np.array(__a )
_a : Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__a ):
_a : Optional[int] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_a : Any = np.array(list(map({'''-''': 0, '''+''': 1}.get ,g[1][0].strip() ) ) )
_a : Tuple = np.zeros(
(
len(__a ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__a ):
_a : Union[str, Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__a ,atom_mask=__a ,aatype=__a ,residue_index=np.arange(len(__a ) ) ,b_factors=__a ,)
def __UpperCAmelCase ( __a : Protein ,__a : int = 0 ) -> List[str]:
"""simple docstring"""
_a : List[str] = []
_a : str = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_a : Optional[int] = prot.parents
_a : Tuple = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_a : int = [p for i, p in zip(__a ,__a ) if i == chain_id]
if parents is None or len(__a ) == 0:
_a : List[Any] = ['''N/A''']
pdb_headers.append(F"""PARENT {' '.join(__a )}""" )
return pdb_headers
def __UpperCAmelCase ( __a : Protein ,__a : str ) -> str:
"""simple docstring"""
_a : List[str] = []
_a : Union[str, Any] = pdb_str.split('''\n''' )
_a : Any = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_a : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_a : Tuple = []
if prot.parents_chain_index is not None:
_a : Dict[str, List[str]] = {}
for p, i in zip(prot.parents ,prot.parents_chain_index ):
parent_dict.setdefault(str(__a ) ,[] )
parent_dict[str(__a )].append(__a )
_a : List[Any] = max([int(__a ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_a : int = parent_dict.get(str(__a ) ,['''N/A'''] )
parents_per_chain.append(__a )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_a : Tuple = [['''N/A''']]
def make_parent_line(__a : Sequence[str] ) -> str:
return F"""PARENT {' '.join(__a )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_a : List[Any] = 0
for i, l in enumerate(__a ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__a )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__a ):
_a : str = parents_per_chain[chain_counter]
else:
_a : Optional[int] = ['''N/A''']
out_pdb_lines.append(make_parent_line(__a ) )
return "\n".join(__a )
def __UpperCAmelCase ( __a : Protein ) -> str:
"""simple docstring"""
_a : str = residue_constants.restypes + ['''X''']
def res_atoa(__a : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] ,'''UNK''' )
_a : Dict = residue_constants.atom_types
_a : List[str] = []
_a : List[Any] = prot.atom_mask
_a : Union[str, Any] = prot.aatype
_a : Optional[Any] = prot.atom_positions
_a : Dict = prot.residue_index.astype(np.intaa )
_a : Tuple = prot.b_factors
_a : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
_a : List[Any] = get_pdb_headers(__a )
if len(__a ) > 0:
pdb_lines.extend(__a )
_a : List[str] = aatype.shape[0]
_a : Optional[Any] = 1
_a : List[str] = 0
_a : Dict = string.ascii_uppercase
_a : Optional[Any] = None
# Add all atom sites.
for i in range(__a ):
_a : Any = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__a ,atom_positions[i] ,atom_mask[i] ,b_factors[i] ):
if mask < 0.5:
continue
_a : Optional[Any] = '''ATOM'''
_a : List[Any] = atom_name if len(__a ) == 4 else F""" {atom_name}"""
_a : Union[str, Any] = ''''''
_a : Dict = ''''''
_a : List[Any] = 1.00
_a : int = atom_name[0] # Protein supports only C, N, O, S, this works.
_a : int = ''''''
_a : Union[str, Any] = '''A'''
if chain_index is not None:
_a : int = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_a : Tuple = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__a )
atom_index += 1
_a : List[Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_a : Optional[int] = True
_a : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_a : str = '''TER'''
_a : Any = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__a )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__a ,__a ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__a )
def __UpperCAmelCase ( __a : Protein ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __UpperCAmelCase ( __a : FeatureDict ,__a : ModelOutput ,__a : Optional[np.ndarray] = None ,__a : Optional[np.ndarray] = None ,__a : Optional[str] = None ,__a : Optional[Sequence[str]] = None ,__a : Optional[Sequence[int]] = None ,) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] ,atom_positions=result['''final_atom_positions'''] ,atom_mask=result['''final_atom_mask'''] ,residue_index=features['''residue_index'''] + 1 ,b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) ,chain_index=__a ,remark=__a ,parents=__a ,parents_chain_index=__a ,)
| 15 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
a__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
a__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
a__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
a__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
a__ = ''''''
a__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
a__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
a__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : List[str] ) -> Optional[int]:
"""simple docstring"""
assert ReadMe.from_string(__a ,__a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[str] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
_a : List[Any] = ReadMe.from_string(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> Tuple:
"""simple docstring"""
with pytest.raises(__a ,match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
ReadMe.from_string(__a ,__a ,suppress_parsing_errors=__a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Any ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Tuple = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[Any] = ReadMe.from_readme(__a ,__a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __UpperCAmelCase ( __a : List[Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : Optional[int] = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
_a : Any = ReadMe.from_readme(__a ,__a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : Optional[Any] = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
_a : str = expected_error.format(path=__a )
with pytest.raises(__a ,match=re.escape(__a ) ):
ReadMe.from_readme(__a ,__a )
@pytest.mark.parametrize(
'''readme_md,''' ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __UpperCAmelCase ( __a : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a : int = Path(__a ) / '''README.md'''
with open(__a ,'''w+''' ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a ,__a ,suppress_parsing_errors=__a )
| 15 | 1 |
from math import factorial
def __UpperCAmelCase ( __a : int ,__a : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(__a ) // (factorial(__a ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 15 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 15 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=6_4 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=3_2 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=1_0 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ) -> Optional[int]:
_a : int = parent
_a : str = batch_size
_a : Dict = image_size
_a : str = patch_size
_a : Optional[Any] = num_channels
_a : str = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_a : Any = hidden_act
_a : Dict = conv_kernel_size
_a : List[Any] = output_stride
_a : Any = classifier_dropout_prob
_a : Optional[int] = use_labels
_a : List[str] = is_training
_a : List[Any] = num_labels
_a : Any = initializer_range
_a : Union[str, Any] = scope
_a : Dict = width_multiplier
_a : int = ffn_dropout
_a : int = attn_dropout
def __lowercase ( self ) -> int:
_a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
_a : List[str] = None
if self.use_labels:
_a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self ) -> List[Any]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __lowercase ( self , _a , _a , _a , _a ) -> Any:
_a : str = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self , _a , _a , _a , _a ) -> str:
_a : Union[str, Any] = self.num_labels
_a : int = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a ) -> Union[str, Any]:
_a : Optional[Any] = self.num_labels
_a : List[str] = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a : Union[str, Any] = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self ) -> int:
_a : str = self.prepare_config_and_inputs()
_a , _a , _a , _a : Dict = config_and_inputs
_a : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> List[str]:
_a : Any = MobileViTVaModelTester(self )
_a : List[Any] = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __lowercase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> List[Any]:
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(_a )
_a : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Any:
def check_hidden_states_output(_a , _a , _a ):
_a : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[int] = outputs.hidden_states
_a : List[str] = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a : List[str] = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> Dict:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __lowercase ( self ) -> Union[str, Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
_a : str = self.default_image_processor
_a : List[Any] = prepare_img()
_a : str = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Any = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : Dict = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Tuple:
_a : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : List[str] = model.to(_a )
_a : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : List[Any] = model(**_a )
_a : Dict = outputs.logits
# verify the logits
_a : Tuple = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _a )
_a : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Tuple:
_a : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : Optional[Any] = model.to(_a )
_a : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : List[str] = prepare_img()
_a : Dict = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : int = model(**_a )
_a : List[Any] = outputs.logits.detach().cpu()
_a : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(5_0, 6_0)] )
_a : Tuple = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _a )
_a : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_a )
_a : Optional[int] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _a )
| 15 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15 | 1 |
import math
def __UpperCAmelCase ( __a : list ,__a : int = 0 ,__a : int = 0 ) -> list:
"""simple docstring"""
_a : Any = end or len(__a )
for i in range(__a ,__a ):
_a : Optional[int] = i
_a : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_a : Tuple = array[temp_index - 1]
temp_index -= 1
_a : str = temp_index_value
return array
def __UpperCAmelCase ( __a : list ,__a : int ,__a : int ) -> None: # Max Heap
"""simple docstring"""
_a : Optional[Any] = index
_a : List[Any] = 2 * index + 1 # Left Node
_a : int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_a : List[str] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_a : Union[str, Any] = right_index
if largest != index:
_a , _a : Optional[Any] = array[largest], array[index]
heapify(__a ,__a ,__a )
def __UpperCAmelCase ( __a : list ) -> list:
"""simple docstring"""
_a : Any = len(__a )
for i in range(n // 2 ,-1 ,-1 ):
heapify(__a ,__a ,__a )
for i in range(n - 1 ,0 ,-1 ):
_a , _a : List[Any] = array[0], array[i]
heapify(__a ,0 ,__a )
return array
def __UpperCAmelCase ( __a : list ,__a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( __a : list ,__a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
_a : Optional[int] = low
_a : int = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_a , _a : str = array[j], array[i]
i += 1
def __UpperCAmelCase ( __a : list ) -> list:
"""simple docstring"""
if len(__a ) == 0:
return array
_a : Optional[Any] = 2 * math.ceil(math.loga(len(__a ) ) )
_a : Optional[Any] = 16
return intro_sort(__a ,0 ,len(__a ) ,__a ,__a )
def __UpperCAmelCase ( __a : list ,__a : int ,__a : int ,__a : int ,__a : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__a )
max_depth -= 1
_a : Dict = median_of_a(__a ,__a ,start + ((end - start) // 2) + 1 ,end - 1 )
_a : Optional[Any] = partition(__a ,__a ,__a ,__a )
intro_sort(__a ,__a ,__a ,__a ,__a )
_a : int = p
return insertion_sort(__a ,__a ,__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = input('''Enter numbers separated by a comma : ''').strip()
a__ = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 15 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase ( self ) -> List[Any]:
_a : str = 1
_a : int = 3
_a : int = (3_2, 3_2)
_a : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def __lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def __lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_a : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __lowercase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(_a )
@property
def __lowercase ( self ) -> Optional[int]:
def extract(*_a , **_a ):
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Dict:
_a : Union[str, Any] = torch.ones([0] )
def __lowercase ( self , _a ) -> Any:
self.pixel_values.to(_a )
return self
return Out()
return extract
def __lowercase ( self ) -> Any:
_a : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : int = self.dummy_cond_unet
_a : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
_a : List[Any] = self.dummy_vae
_a : Dict = self.dummy_text_encoder
_a : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a : Tuple = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_a : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : Optional[int] = '''A painting of a squirrel eating a burger'''
_a : Union[str, Any] = torch.Generator(device=_a ).manual_seed(0 )
_a : Dict = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a : Union[str, Any] = output.images
_a : Optional[int] = torch.Generator(device=_a ).manual_seed(0 )
_a : int = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=_a , )[0]
_a : Dict = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a : Union[str, Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self ) -> Dict:
_a : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : int = self.dummy_cond_unet
_a : int = PNDMScheduler(skip_prk_steps=_a )
_a : Dict = self.dummy_vae
_a : int = self.dummy_text_encoder
_a : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
_a : Tuple = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_a : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : Tuple = '''A painting of a squirrel eating a burger'''
_a : List[Any] = torch.Generator(device=_a ).manual_seed(0 )
_a : List[Any] = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
_a : int = output.images
_a : int = torch.Generator(device=_a ).manual_seed(0 )
_a : Optional[Any] = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a : List[str] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self ) -> List[str]:
_a : int = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=_a )
assert isinstance(_a , _a )
assert isinstance(pipe.scheduler , _a )
assert pipe.safety_checker is None
_a : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
_a : List[str] = StableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_a : List[str] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = self.dummy_cond_unet
_a : Dict = PNDMScheduler(skip_prk_steps=_a )
_a : List[Any] = self.dummy_vae
_a : str = self.dummy_text_encoder
_a : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
_a : Union[str, Any] = unet.half()
_a : Optional[int] = vae.half()
_a : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
_a : List[Any] = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_a : Union[str, Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : Optional[int] = '''A painting of a squirrel eating a burger'''
_a : str = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Any:
_a : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=_a )
_a : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a : Union[str, Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : Optional[Any] = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
_a : Any = 4_0_0_3_6_6_0_3_4_6
_a : Tuple = 7
# without safety guidance (sld_guidance_scale = 0)
_a : Any = torch.manual_seed(_a )
_a : Any = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_a : Optional[int] = output.images
_a : Optional[int] = image[0, -3:, -3:, -1]
_a : List[Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_a : List[Any] = torch.manual_seed(_a )
_a : List[str] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : List[str] = output.images
_a : Dict = image[0, -3:, -3:, -1]
_a : Dict = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self ) -> Optional[int]:
_a : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=_a )
_a : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_a : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : Tuple = '''padme amidala taking a bath artwork, safe for work, no nudity'''
_a : Union[str, Any] = 2_7_3_4_9_7_1_7_5_5
_a : int = 7
_a : Optional[Any] = torch.manual_seed(_a )
_a : str = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_a : str = output.images
_a : Optional[int] = image[0, -3:, -3:, -1]
_a : List[str] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_a : Union[str, Any] = torch.manual_seed(_a )
_a : Any = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : List[str] = output.images
_a : Optional[Any] = image[0, -3:, -3:, -1]
_a : Optional[int] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
_a : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : Tuple = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
_a : Optional[Any] = 1_0_4_4_3_5_5_2_3_4
_a : Any = 1_2
_a : int = torch.manual_seed(_a )
_a : List[Any] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
_a : str = output.images
_a : str = image[0, -3:, -3:, -1]
_a : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_a : Union[str, Any] = torch.manual_seed(_a )
_a : Tuple = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=5_0 , output_type='''np''' , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_a : Any = output.images
_a : Dict = image[0, -3:, -3:, -1]
_a : Any = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 15 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_a : Any = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_a : str = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __lowercase ( self , **_a ) -> Any:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , **_a ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_a : Tuple = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self ) -> str:
_a : List[str] = self.get_tokenizer()
_a : Tuple = self.get_image_processor()
_a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Dict:
_a : List[str] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : List[Any] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_a : Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Dict = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : int = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[str] = self.prepare_image_inputs()
_a : List[Any] = image_processor(_a , return_tensors='''np''' )
_a : Dict = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Tuple = '''lower newer'''
_a : int = processor(text=_a )
_a : str = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self ) -> List[Any]:
_a : Any = self.get_image_processor()
_a : str = self.get_tokenizer()
_a : Tuple = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : List[Any] = '''lower newer'''
_a : Union[str, Any] = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a : int = processor.batch_decode(_a )
_a : int = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowercase ( self ) -> List[Any]:
_a : Tuple = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : str = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_a : Optional[int] = '''lower newer'''
_a : Dict = self.prepare_image_inputs()
_a : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 15 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "roberta"
def __init__( self , _a=5_0_2_6_5 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : int = hidden_size
_a : str = num_hidden_layers
_a : int = num_attention_heads
_a : Tuple = hidden_act
_a : str = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : int = initializer_range
_a : Optional[int] = layer_norm_eps
_a : Dict = position_embedding_type
_a : int = use_cache
_a : List[str] = classifier_dropout
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a__ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 15 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : str = "ViltImageProcessor"
UpperCAmelCase__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Dict = kwargs.pop('''feature_extractor''' )
_a : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : int = self.image_processor
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
_a : Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
_a : str = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def __lowercase ( self , *_a , **_a ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> str:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[int]:
_a : str = self.tokenizer.model_input_names
_a : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Any:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 15 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
a__ = 299792458
# Symbols
a__ , a__ , a__ , a__ = symbols('''ct x y z''')
def __UpperCAmelCase ( __a : float ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def __UpperCAmelCase ( __a : float ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(__a ) ** 2 )
def __UpperCAmelCase ( __a : float ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(__a ), -gamma(__a ) * beta(__a ), 0, 0],
[-gamma(__a ) * beta(__a ), gamma(__a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __UpperCAmelCase ( __a : float ,__a : np.ndarray | None = None ) -> np.ndarray:
"""simple docstring"""
if event is None:
_a : List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
a__ = transform(29979245)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
a__ = {ct: c, x: 1, y: 1, z: 1}
a__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.