code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : List[str]=13, lowerCamelCase : Any=[30, 30], lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Tuple=3, lowerCamelCase : int=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Tuple=32, lowerCamelCase : Tuple=5, lowerCamelCase : Union[str, Any]=4, lowerCamelCase : Optional[Any]=37, lowerCamelCase : List[str]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Dict=10, lowerCamelCase : List[str]=0.02, lowerCamelCase : str=3, lowerCamelCase : List[Any]=None, lowerCamelCase : Tuple=8, lowerCamelCase : str=10, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = n_targets
lowercase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ = num_patches + 1 + self.num_detection_tokens
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ = []
for i in range(self.batch_size ):
lowercase__ = {}
lowercase__ = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=lowerCamelCase )
lowercase__ = torch.rand(self.n_targets, 4, device=lowerCamelCase )
labels.append(lowerCamelCase )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = YolosModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = YolosForObjectDetection(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(pixel_values=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ = model(pixel_values=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[str], lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ = []
for i in range(self.model_tester.batch_size ):
lowercase__ = {}
lowercase__ = torch.ones(
size=(self.model_tester.n_targets,), device=lowerCamelCase, dtype=torch.long )
lowercase__ = torch.ones(
self.model_tester.n_targets, 4, device=lowerCamelCase, dtype=torch.float )
labels.append(lowerCamelCase )
lowercase__ = labels
return inputs_dict
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = YolosModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
'''simple docstring'''
pass
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
# in YOLOS, the seq_len is different
lowercase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowercase__ = len(lowerCamelCase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = 1
self.assertEqual(out_len + added_hidden_states, len(lowerCamelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[int] ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
# YOLOS has a different seq_length
lowercase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCamelCase )
@slow
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = YolosModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(inputs.pixel_values )
# verify outputs
lowercase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]], device=lowerCamelCase, )
lowercase__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]], device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
# verify postprocessing
lowercase__ = image_processor.post_process_object_detection(
lowerCamelCase, threshold=0.3, target_sizes=[image.size[::-1]] )[0]
lowercase__ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowerCamelCase )
lowercase__ = [75, 75, 17, 63, 17]
lowercase__ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowerCamelCase )
self.assertEqual(len(results['''scores'''] ), 5 )
self.assertTrue(torch.allclose(results['''scores'''], lowerCamelCase, atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist(), lowerCamelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :], lowerCamelCase ) )
| 720 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A__ : int = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 384
lowercase__ = 7
if "tiny" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 6, 2)
lowercase__ = (3, 6, 12, 24)
elif "small" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 18, 2)
lowercase__ = (3, 6, 12, 24)
elif "base" in model_name:
lowercase__ = 128
lowercase__ = (2, 2, 18, 2)
lowercase__ = (4, 8, 16, 32)
lowercase__ = 12
lowercase__ = 512
elif "large" in model_name:
lowercase__ = 192
lowercase__ = (2, 2, 18, 2)
lowercase__ = (6, 12, 24, 48)
lowercase__ = 12
lowercase__ = 768
# set label information
lowercase__ = 150
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''ade20k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowercase__ = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dct.pop(lowerCamelCase_ )
lowercase__ = val
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
lowercase__ = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:dim, :]
lowercase__ = in_proj_bias[: dim]
lowercase__ = in_proj_weight[
dim : dim * 2, :
]
lowercase__ = in_proj_bias[
dim : dim * 2
]
lowercase__ = in_proj_weight[
-dim :, :
]
lowercase__ = in_proj_bias[-dim :]
# fmt: on
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ , lowercase__ = x.shape
lowercase__ = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
lowercase__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ , lowercase__ = x.shape
lowercase__ = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
lowercase__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = x.shape[0]
lowercase__ = x.reshape(4 , in_channel // 4 )
lowercase__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = x.shape[0]
lowercase__ = x.reshape(in_channel // 4 , 4 )
lowercase__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='''cpu''' , file_name=lowerCamelCase_ )[
'''state_dict'''
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
lowercase__ = get_upernet_config(lowerCamelCase_ )
lowercase__ = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
lowercase__ = key.replace('''bn''' , '''batch_norm''' )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase__ = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
lowercase__ = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
lowercase__ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowercase__ = model(lowerCamelCase_ )
lowercase__ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase__ = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
lowercase__ = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
lowercase__ = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
lowercase__ = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : Any=13, lowerCamelCase : Union[str, Any]=7, lowerCamelCase : Dict=True, lowerCamelCase : Dict=True, lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : Any=99, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : Any=2, lowerCamelCase : Any=4, lowerCamelCase : Any=37, lowerCamelCase : Tuple="gelu", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : int=16, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : int=3, lowerCamelCase : Any=4, lowerCamelCase : str=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 32
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = '''gelu'''
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 512
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = None
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCamelCase, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = TFRoFormerModel(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TFRoFormerForCausalLM(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ), [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : Tuple, lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = TFRoFormerForMaskedLM(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFRoFormerForSequenceClassification(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = TFRoFormerForMultipleChoice(config=lowerCamelCase )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFRoFormerForTokenClassification(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = TFRoFormerForQuestionAnswering(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : str, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = TFRoFormerModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowerCamelCase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(lowerCamelCase )[0]
# TODO Replace vocab size
lowercase__ = 50_000
lowercase__ = [1, 6, vocab_size]
self.assertEqual(output.shape, lowerCamelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCamelCase, atol=1E-4 )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 1E-4
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = tf.constant([[4, 10]] )
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6 )
lowercase__ = emba(input_ids.shape )
lowercase__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, atol=self.tolerance )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512 )
emba([2, 16, 512] )
lowercase__ = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, atol=self.tolerance )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 1E-4
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.floataa ), shape=(2, 12, 16, 64) ) / 100
lowercase__ = -tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.floataa ), shape=(2, 12, 16, 64) ) / 100
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64 )
lowercase__ = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase__ , lowercase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
lowercase__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8], lowerCamelCase, atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8], lowerCamelCase, atol=self.tolerance )
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , ):
'''simple docstring'''
lowercase__ = {}
if train_file is not None:
lowercase__ = [train_file]
if eval_file is not None:
lowercase__ = [eval_file]
if test_file is not None:
lowercase__ = [test_file]
lowercase__ = datasets.load_dataset('''csv''' , data_files=lowerCamelCase_ )
lowercase__ = list(ds[list(files.keys() )[0]].features.keys() )
lowercase__ = features_name.pop(lowerCamelCase_ )
lowercase__ = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowercase__ = {label: i for i, label in enumerate(lowerCamelCase_ )}
lowercase__ = tokenizer.model_input_names
lowercase__ = {}
if len(lowerCamelCase_ ) == 1:
for k in files.keys():
lowercase__ = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' ) , batched=lowerCamelCase_ , )
elif len(lowerCamelCase_ ) == 2:
for k in files.keys():
lowercase__ = ds[k].map(
lambda lowerCamelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='''max_length''' , ) , batched=lowerCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
lowercase__ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowercase__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowercase__ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowercase__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowercase__ = (
tf.data.Dataset.from_generator(
lowerCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowercase__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
A__ : List[Any] = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(metadata={"""help""": """Which column contains the label"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """The path of the training file"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """The path of the development file"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """The path of the test file"""} )
lowercase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase__ = field(
default=A__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
def a ( ):
'''simple docstring'''
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase_ ) , labelaid=lowerCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase_ ) -> Dict:
lowercase__ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowercase__ = TFTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCamelCase_ )
return results
if __name__ == "__main__":
main()
| 702 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : str = '▁'
A__ : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
A__ : int = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A__ : int = {
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
A__ : Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
lowercase__ = []
def __init__( self : Optional[Any], lowerCamelCase : int, lowerCamelCase : Optional[int]="<s>", lowerCamelCase : List[Any]="</s>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : str="<s>", lowerCamelCase : Any="<unk>", lowerCamelCase : Dict="<pad>", lowerCamelCase : int="<mask>", lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : int=None, lowerCamelCase : Optional[Dict[str, Any]] = None, lowerCamelCase : List[Any]=None, lowerCamelCase : List[str]=False, **lowerCamelCase : str, ):
'''simple docstring'''
lowercase__ = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = legacy_behaviour
super().__init__(
bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, tokenizer_file=lowerCamelCase, src_lang=lowerCamelCase, tgt_lang=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=lowerCamelCase, **lowerCamelCase, )
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
lowercase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ = 1
lowercase__ = len(self.sp_model )
lowercase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase )
}
lowercase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase__ = src_lang if src_lang is not None else '''eng_Latn'''
lowercase__ = self.lang_code_to_id[self._src_lang]
lowercase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase__ ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Optional[Any], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase, token_ids_a=lowerCamelCase, already_has_special_tokens=lowerCamelCase )
lowercase__ = [1] * len(self.prefix_tokens )
lowercase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase )) + ([0] * len(lowerCamelCase )) + suffix_ones
def lowercase__ ( self : str, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : int, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : str, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[str], lowerCamelCase : Optional[str], **lowerCamelCase : int ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase__ = src_lang
lowercase__ = self(lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase )
lowercase__ = self.convert_tokens_to_ids(lowerCamelCase )
lowercase__ = tgt_lang_id
return inputs
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Optional[int], lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Dict ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : int, lowerCamelCase : List[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : List[Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = ''''''.join(lowerCamelCase ).replace(lowerCamelCase, ''' ''' ).strip()
return out_string
def lowercase__ ( self : Tuple, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
def lowercase__ ( self : int, lowerCamelCase : List[str], lowerCamelCase : str = "eng_Latn", lowerCamelCase : Optional[List[str]] = None, lowerCamelCase : str = "fra_Latn", **lowerCamelCase : Tuple, ):
'''simple docstring'''
lowercase__ = src_lang
lowercase__ = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase, lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Any ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
def lowercase__ ( self : Tuple, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
| 703 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Tuple=3, lowerCamelCase : int=32, lowerCamelCase : str=3, lowerCamelCase : Tuple=10, lowerCamelCase : Dict=[10, 20, 30, 40], lowerCamelCase : List[str]=[1, 1, 2, 1], lowerCamelCase : List[Any]=True, lowerCamelCase : int=True, lowerCamelCase : Optional[Any]="relu", lowerCamelCase : Dict=3, lowerCamelCase : Optional[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = RegNetModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase__ ( self : List[str], lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = RegNetForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = RegNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase )
for name, module in model.named_modules():
if isinstance(lowerCamelCase, (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ), msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", )
self.assertTrue(
torch.all(module.bias == 0 ), msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", )
def lowercase__ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : Dict ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = RegNetModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 704 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=('''AttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''AttnUpBlock2D'''), )
return model
@property
def lowercase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), cross_attention_dim=10, )
return model
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D'''), up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D'''), )
lowercase__ = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=('''AttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''AttnUpBlock2D'''), )
return vqvae, unet
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
lowercase__ = DDPMScheduler()
lowercase__ = AudioDiffusionPipeline(vqvae=lowerCamelCase, unet=self.dummy_unet, mel=lowerCamelCase, scheduler=lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(42 )
lowercase__ = pipe(generator=lowerCamelCase, steps=4 )
lowercase__ = output.audios[0]
lowercase__ = output.images[0]
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(42 )
lowercase__ = pipe(generator=lowerCamelCase, steps=4, return_dict=lowerCamelCase )
lowercase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase__ = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.frombuffer(image_from_tuple.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
lowercase__ = DDIMScheduler()
lowercase__ = self.dummy_vqvae_and_unet
lowercase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=lowerCamelCase, scheduler=lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
np.random.seed(0 )
lowercase__ = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(42 )
lowercase__ = pipe(raw_audio=lowerCamelCase, generator=lowerCamelCase, start_step=5, steps=10 )
lowercase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase__ = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase__ = self.dummy_unet_condition
lowercase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=lowerCamelCase, mel=lowerCamelCase, scheduler=lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
np.random.seed(0 )
lowercase__ = torch.rand((1, 1, 10) )
lowercase__ = pipe(generator=lowerCamelCase, encoding=lowerCamelCase )
lowercase__ = output.images[0]
lowercase__ = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = torch_device
lowercase__ = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(42 )
lowercase__ = pipe(generator=lowerCamelCase )
lowercase__ = output.audios[0]
lowercase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase__ = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 705 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import random
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = a[left_index]
lowercase__ = left_index + 1
for j in range(left_index + 1 , lowerCamelCase_ ):
if a[j] < pivot:
lowercase__ , lowercase__ = a[i], a[j]
i += 1
lowercase__ , lowercase__ = a[i - 1], a[left_index]
return i - 1
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if left < right:
lowercase__ = random.randint(lowerCamelCase_ , right - 1 )
lowercase__ , lowercase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__ = partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
quick_sort_random(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase_ , pivot_index + 1 , lowerCamelCase_ ) # recursive quicksort to the right of the pivot point
def a ( ):
'''simple docstring'''
lowercase__ = input('''Enter numbers separated by a comma:\n''' ).strip()
lowercase__ = [int(lowerCamelCase_ ) for item in user_input.split(''',''' )]
quick_sort_random(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) )
print(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 706 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : Any, lowerCamelCase : Optional[int]=13, lowerCamelCase : int=3, lowerCamelCase : Optional[Any]=224, lowerCamelCase : List[str]=30, lowerCamelCase : List[Any]=400, lowerCamelCase : List[str]=True, lowerCamelCase : Tuple=None, lowerCamelCase : int=True, lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5], lowerCamelCase : Any=[0.5, 0.5, 0.5], ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = EfficientFormerImageProcessorTester(self )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
lowercase__ = image_processor(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
lowercase__ = image_processor(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
lowercase__ = image_processor(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), ) | 707 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = float(lowerCamelCase_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
lowercase__ = decimal - int(lowerCamelCase_ )
if fractional_part == 0:
return int(lowerCamelCase_ ), 1
else:
lowercase__ = len(str(lowerCamelCase_ ).split('''.''' )[1] )
lowercase__ = int(decimal * (10**number_of_frac_digits) )
lowercase__ = 10**number_of_frac_digits
lowercase__ , lowercase__ = denominator, numerator
while True:
lowercase__ = dividend % divisor
if remainder == 0:
break
lowercase__ , lowercase__ = divisor, remainder
lowercase__ , lowercase__ = numerator / divisor, denominator / divisor
return int(lowerCamelCase_ ), int(lowerCamelCase_ )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }") | 708 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Dict = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A__ : Optional[int] = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = True
while ask_again:
lowercase__ = input(lowerCamelCase_ )
try:
if default is not None and len(lowerCamelCase_ ) == 0:
return default
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_=[] , lowerCamelCase_=None , lowerCamelCase_=0 ):
'''simple docstring'''
lowercase__ = BulletMenu(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = menu.run(default_choice=lowerCamelCase_ )
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowercase__ ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = super()._format_usage(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = usage.replace('''<command> [<args>] ''', '''''' )
return usage
| 710 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 0 |
import argparse
from collections import defaultdict
import yaml
A__ : Union[str, Any] = 'docs/source/en/_toctree.yml'
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = []
lowercase__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(lowerCamelCase_ )
lowercase__ = new_doc_list
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(lowerCamelCase_ ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
lowercase__ = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCamelCase_ ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(lowerCamelCase_ )
# Sort
return overview_doc
def a ( lowerCamelCase_=False ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__ = api_doc[scheduler_idx]['''sections''']
lowercase__ = clean_doc_toc(lowerCamelCase_ )
lowercase__ = False
if new_scheduler_doc != scheduler_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_scheduler_doc
if diff:
if overwrite:
lowercase__ = api_doc
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def a ( lowerCamelCase_=False ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__ = False
lowercase__ = api_doc[pipeline_idx]['''sections''']
lowercase__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__ = pipeline_doc['''section''']
lowercase__ = clean_doc_toc(lowerCamelCase_ )
if overwrite:
lowercase__ = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCamelCase_ )
# sort overall pipeline doc
lowercase__ = clean_doc_toc(lowerCamelCase_ )
if new_pipeline_docs != pipeline_docs:
lowercase__ = True
if overwrite:
lowercase__ = new_pipeline_docs
if diff:
if overwrite:
lowercase__ = api_doc
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A__ : Optional[int] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 711 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 0 |
from collections import Counter
from timeit import timeit
def a ( lowerCamelCase_ = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a ( lowerCamelCase_ = "" ):
'''simple docstring'''
if len(lowerCamelCase_ ) == 0:
return True
lowercase__ = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ = {}
for character in lower_case_input_str:
lowercase__ = character_freq_dict.get(lowerCamelCase_ , 0 ) + 1
lowercase__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a ( lowerCamelCase_ = "" ):
'''simple docstring'''
print('''\nFor string = ''' , lowerCamelCase_ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(lowerCamelCase_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
A__ : Optional[int] = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
A__ : List[str] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 712 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : Optional[Any] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 714 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = TextaTextGenerationPipeline(model=lowerCamelCase, tokenizer=lowerCamelCase )
return generator, ["Something to write", "Something else"]
def lowercase__ ( self : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = generator('''Something there''' )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ANY(lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
lowercase__ = generator(['''This is great !''', '''Something else'''], num_return_sequences=2, do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
], )
lowercase__ = generator(
['''This is great !''', '''Something else'''], num_return_sequences=2, batch_size=2, do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
], )
with self.assertRaises(lowerCamelCase ):
generator(4 )
@require_torch
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = pipeline('''text2text-generation''', model='''patrickvonplaten/t5-tiny-random''', framework='''pt''' )
# do_sample=False necessary for reproducibility
lowercase__ = generator('''Something there''', do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ''''''}] )
lowercase__ = 3
lowercase__ = generator(
'''Something there''', num_return_sequences=lowerCamelCase, num_beams=lowerCamelCase, )
lowercase__ = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = generator('''This is a test''', do_sample=lowerCamelCase, num_return_sequences=2, return_tensors=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
], )
lowercase__ = generator.model.config.eos_token_id
lowercase__ = '''<pad>'''
lowercase__ = generator(
['''This is a test''', '''This is a second test'''], do_sample=lowerCamelCase, num_return_sequences=2, batch_size=2, return_tensors=lowerCamelCase, )
self.assertEqual(
lowerCamelCase, [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
], )
@require_tf
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = pipeline('''text2text-generation''', model='''patrickvonplaten/t5-tiny-random''', framework='''tf''' )
# do_sample=False necessary for reproducibility
lowercase__ = generator('''Something there''', do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ''''''}] )
| 716 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A__ : int = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, '''models/bert/''' ) )
lowercase__ = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase, '''src/transformers/models/bert/modeling_bert.py''' ), os.path.join(self.transformer_dir, '''models/bert/modeling_bert.py''' ), )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
lowercase__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase__ = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
lowercase__ = black.format_str(lowerCamelCase, mode=lowerCamelCase )
lowercase__ = os.path.join(self.transformer_dir, '''new_code.py''' )
with open(lowerCamelCase, '''w''', newline='''\n''' ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=lowerCamelCase )
with open(lowerCamelCase, '''r''' ) as f:
self.assertTrue(f.read(), lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', REFERENCE_CODE + '''\n''', )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''', '''BertLMPredictionHead''', lowerCamelCase, )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', re.sub('''Bert''', '''TestModel''', lowerCamelCase ), )
# Copy consistency with a really long name
lowercase__ = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""", F"""{long_class_name}LMPredictionHead""", re.sub('''Bert''', lowerCamelCase, lowerCamelCase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''', '''TestModelLMPredictionHead''', lowerCamelCase, overwrite_result=re.sub('''Bert''', '''TestModel''', lowerCamelCase ), )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
self.assertFalse(lowerCamelCase )
self.assertEqual(lowerCamelCase, lowerCamelCase )
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase )
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ , lowercase__ = check_copies.convert_to_localized_md(
lowerCamelCase, lowerCamelCase, localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase, lowerCamelCase )
| 717 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 0 |
'''simple docstring'''
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for node in graph )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
visited.add(lowerCamelCase_ )
rec_stk.add(lowerCamelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : List[str] = '▁'
A__ : Tuple = {'vocab_file': 'spiece.model'}
A__ : Dict = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
A__ : List[Any] = {
'google/pegasus-xsum': 5_12,
}
A__ : int = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict, lowerCamelCase : Any, lowerCamelCase : List[str]="<pad>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : int="<unk>", lowerCamelCase : Optional[int]="<mask_2>", lowerCamelCase : Tuple="<mask_1>", lowerCamelCase : Any=None, lowerCamelCase : Optional[int]=103, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Tuple, ):
'''simple docstring'''
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(lowerCamelCase )}, but is"""
F""" {type(lowerCamelCase )}""" )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(lowerCamelCase ), self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2, self.offset )]
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowercase__ = mask_token_sent
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# add special tokens to encoder dict
lowercase__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowercase__ = {v: k for k, v in self.encoder.items()}
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : List[Any], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : str ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase__ = self.sp_model.piece_to_id(lowerCamelCase )
return sp_id + self.offset
def lowercase__ ( self : Any, lowerCamelCase : int ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase__ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase__ ( self : int, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = []
lowercase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowercase__ = []
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def lowercase__ ( self : str, lowerCamelCase : Dict=False ):
'''simple docstring'''
return 1
def lowercase__ ( self : Tuple, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self : Optional[Any], lowerCamelCase : List, lowerCamelCase : Optional[List] = None, lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Dict=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 719 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import re
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = split_input(lowerCamelCase_ )
if upper:
lowercase__ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowercase__ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a ( lowerCamelCase_ ):
'''simple docstring'''
return to_simple_case(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = to_simple_case(lowerCamelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , '''_''' )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 720 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : str, lowerCamelCase : float ):
'''simple docstring'''
return 0.0
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 512
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(lowerCamelCase_ ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.abs(np.fft.fft(lowerCamelCase_ ) )
lowercase__ = 20 * np.logaa(lowerCamelCase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowercase__ = get_bounds(lowerCamelCase_ , lowerCamelCase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowerCamelCase_ )
plt.show()
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 512
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(lowerCamelCase_ ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.angle(np.fft.fft(lowerCamelCase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowerCamelCase_ , -2 * pi ) )
plt.show()
| 721 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowercase__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''', safety_checker=lowerCamelCase, cache_dir=lowerCamelCase )
lowercase__ = [t[-1] for t in os.walk(os.path.join(lowerCamelCase, os.listdir(lowerCamelCase )[0], '''snapshots''' ) )]
lowercase__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''', safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 4
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
lowercase__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCamelCase ) == num_samples
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''flax''', safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa )
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = FlaxDDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', set_alpha_to_one=lowerCamelCase, steps_offset=1, )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, scheduler=lowerCamelCase, safety_checker=lowerCamelCase, )
lowercase__ = scheduler.create_state()
lowercase__ = scheduler_state
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
# shard inputs and rng
lowercase__ = replicate(lowerCamelCase )
lowercase__ = jax.random.split(lowerCamelCase, lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(lowerCamelCase, dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = jax.random.split(jax.random.PRNGKey(0 ), lowerCamelCase )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase, )
lowercase__ = replicate(lowerCamelCase )
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''bf16''', dtype=jnp.bfloataa, safety_checker=lowerCamelCase, use_memory_efficient_attention=lowerCamelCase, )
lowercase__ = replicate(lowerCamelCase )
lowercase__ = pipeline.prepare_inputs(lowerCamelCase )
lowercase__ = shard(lowerCamelCase )
lowercase__ = pipeline(lowerCamelCase, lowerCamelCase, lowerCamelCase, jit=lowerCamelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowercase__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 700 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ : str = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = TransfoXLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : List[Any], **lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''<unk> UNwanted , running'''
lowercase__ = '''<unk> unwanted, running'''
return input_text, output_text
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=lowerCamelCase )
lowercase__ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCamelCase, ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [0, 4, 8, 7] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=lowerCamelCase )
lowercase__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowercase__ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase ), lowerCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCamelCase ), lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = len(lowerCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCamelCase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ), [1] )
self.assertEqual(tokenizer.decode([1] ), '''new1''' )
| 702 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(lowerCamelCase ) for k, v in self.__dict__.items()} )
| 703 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Tuple = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """vit_mae"""
def __init__( self : List[Any], lowerCamelCase : Optional[Any]=768, lowerCamelCase : Dict=12, lowerCamelCase : List[str]=12, lowerCamelCase : Optional[int]=3_072, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : int=0.0, lowerCamelCase : List[str]=0.0, lowerCamelCase : Any=0.02, lowerCamelCase : Optional[int]=1E-12, lowerCamelCase : Tuple=224, lowerCamelCase : int=16, lowerCamelCase : List[str]=3, lowerCamelCase : Dict=True, lowerCamelCase : List[Any]=16, lowerCamelCase : int=512, lowerCamelCase : int=8, lowerCamelCase : List[str]=2_048, lowerCamelCase : Tuple=0.75, lowerCamelCase : Optional[int]=False, **lowerCamelCase : Optional[int], ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = decoder_num_attention_heads
lowercase__ = decoder_hidden_size
lowercase__ = decoder_num_hidden_layers
lowercase__ = decoder_intermediate_size
lowercase__ = mask_ratio
lowercase__ = norm_pix_loss
| 704 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 0 |
from __future__ import annotations
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError('''partitions must be a positive number!''' )
if partitions > number_of_bytes:
raise ValueError('''partitions can not > number_of_bytes!''' )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(lowerCamelCase_ ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """unispeech-sat"""
def __init__( self : Optional[int], lowerCamelCase : Any=32, lowerCamelCase : Optional[Any]=768, lowerCamelCase : List[str]=12, lowerCamelCase : List[Any]=12, lowerCamelCase : Optional[int]=3_072, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : str=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : int=0.1, lowerCamelCase : int=0.0, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : str=0.02, lowerCamelCase : Optional[Any]=1E-5, lowerCamelCase : Tuple="group", lowerCamelCase : List[str]="gelu", lowerCamelCase : int=(512, 512, 512, 512, 512, 512, 512), lowerCamelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2), lowerCamelCase : Optional[Any]=(10, 3, 3, 3, 3, 2, 2), lowerCamelCase : Optional[int]=False, lowerCamelCase : Optional[Any]=128, lowerCamelCase : Optional[int]=16, lowerCamelCase : Optional[int]=False, lowerCamelCase : Dict=True, lowerCamelCase : Any=0.05, lowerCamelCase : Optional[Any]=10, lowerCamelCase : Any=2, lowerCamelCase : Dict=0.0, lowerCamelCase : Union[str, Any]=10, lowerCamelCase : Dict=0, lowerCamelCase : List[Any]=320, lowerCamelCase : Any=2, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Any=100, lowerCamelCase : int=256, lowerCamelCase : Tuple=256, lowerCamelCase : Any=0.1, lowerCamelCase : Dict="mean", lowerCamelCase : Any=False, lowerCamelCase : Dict=False, lowerCamelCase : Optional[int]=256, lowerCamelCase : Optional[int]=(512, 512, 512, 512, 1_500), lowerCamelCase : Optional[Any]=(5, 3, 3, 1, 1), lowerCamelCase : Union[str, Any]=(1, 2, 3, 1, 1), lowerCamelCase : List[str]=512, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Optional[int]=1, lowerCamelCase : int=2, lowerCamelCase : int=504, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(**lowerCamelCase, pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(lowerCamelCase )
lowercase__ = list(lowerCamelCase )
lowercase__ = list(lowerCamelCase )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = num_clusters
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = feat_quantizer_dropout
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(lowerCamelCase )
lowercase__ = list(lowerCamelCase )
lowercase__ = list(lowerCamelCase )
lowercase__ = xvector_output_dim
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 ) | 707 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[str] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 708 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 0 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A__ : Any = logging.get_logger(__name__)
A__ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
A__ : List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
A__ : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
A__ : int = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A__ : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A__ : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : List[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A__ : int = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A__ : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(A__ )
class _UpperCAmelCase :
"""simple docstring"""
def __call__( self : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Union[bool, str] = False, lowerCamelCase : Union[bool, str] = False, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Optional[bool] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = titles if not isinstance(lowerCamelCase, lowerCamelCase ) else [titles]
lowercase__ = texts if not isinstance(lowerCamelCase, lowerCamelCase ) else [texts]
lowercase__ = len(lowerCamelCase )
lowercase__ = questions if not isinstance(lowerCamelCase, lowerCamelCase ) else [questions] * n_passages
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
F"""There should be as many titles than texts but got {len(lowerCamelCase )} titles and {len(lowerCamelCase )} texts.""" )
lowercase__ = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase )['''input_ids''']
lowercase__ = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase )['''input_ids''']
lowercase__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase )
def lowercase__ ( self : Tuple, lowerCamelCase : BatchEncoding, lowerCamelCase : DPRReaderOutput, lowerCamelCase : int = 16, lowerCamelCase : int = 64, lowerCamelCase : int = 4, ):
'''simple docstring'''
lowercase__ = reader_input['''input_ids''']
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(lowerCamelCase )
lowercase__ = sorted(range(lowerCamelCase ), reverse=lowerCamelCase, key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(lowerCamelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : List[int], lowerCamelCase : int, lowerCamelCase : int, ):
'''simple docstring'''
lowercase__ = []
for start_index, start_score in enumerate(lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(lowerCamelCase, key=lambda lowerCamelCase : x[1], reverse=lowerCamelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
lowercase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["""input_ids""", """attention_mask"""]
| 710 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Any = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """roberta"""
def __init__( self : str, lowerCamelCase : Dict=50_265, lowerCamelCase : int=768, lowerCamelCase : Optional[int]=12, lowerCamelCase : Dict=12, lowerCamelCase : Optional[Any]=3_072, lowerCamelCase : Union[str, Any]="gelu", lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : str=512, lowerCamelCase : str=2, lowerCamelCase : int=0.02, lowerCamelCase : Any=1E-12, lowerCamelCase : int=1, lowerCamelCase : List[str]=0, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Any="absolute", lowerCamelCase : Dict=True, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 711 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 713 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 714 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 0 |
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : int = 6 ):
'''simple docstring'''
lowercase__ = None
lowercase__ = None
self.create_linked_list(lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = Node()
lowercase__ = current_node
lowercase__ = current_node
lowercase__ = current_node
for _ in range(1, lowerCamelCase ):
lowercase__ = Node()
lowercase__ = current_node
lowercase__ = previous_node
lowercase__ = current_node
lowercase__ = self.front
lowercase__ = previous_node
def lowercase__ ( self : int ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowercase__ ( self : Dict, lowerCamelCase : Any ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowercase__ = self.rear.next
if self.rear:
lowercase__ = data
def lowercase__ ( self : Tuple ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowercase__ = self.front.data
lowercase__ = None
return data
lowercase__ = self.front
lowercase__ = old_front.next
lowercase__ = old_front.data
lowercase__ = None
return data
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.is_empty():
raise Exception('''Empty Queue''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = None
lowercase__ = None
lowercase__ = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
A__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Union[str, Any] = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
A__ : int = {
'unc-nlp/lxmert-base-uncased': 5_12,
}
A__ : List[Any] = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = LxmertTokenizer
def __init__( self : str, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Tuple=None, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : int="[UNK]", lowerCamelCase : Optional[int]="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Dict="[CLS]", lowerCamelCase : List[str]="[MASK]", lowerCamelCase : Union[str, Any]=True, lowerCamelCase : List[Any]=None, **lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : int, lowerCamelCase : Optional[Any], lowerCamelCase : Any=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : int, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 716 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 0 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = val
lowercase__ = None
lowercase__ = None
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
lowercase__ = Node(lowerCamelCase )
else:
self.left.insert(lowerCamelCase )
elif val > self.val:
if self.right is None:
lowercase__ = Node(lowerCamelCase )
else:
self.right.insert(lowerCamelCase )
else:
lowercase__ = val
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# Recursive traversal
if root:
inorder(root.left , lowerCamelCase_ )
res.append(root.val )
inorder(root.right , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# Build BST
if len(lowerCamelCase_ ) == 0:
return arr
lowercase__ = Node(arr[0] )
for i in range(1 , len(lowerCamelCase_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase__ = []
inorder(lowerCamelCase_ , lowerCamelCase_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 717 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''file.csv'''
lowercase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''malformed_file.csv'''
lowercase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''csv_with_image.csv'''
lowercase__ = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''csv_with_label.csv'''
lowercase__ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''csv_with_int_list.csv'''
lowercase__ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase_ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(lowerCamelCase_ ) in record.message
for record in caplog.records )
@require_pil
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowercase__ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowercase__ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowerCamelCase_ ) for label in labels]
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowercase__ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 718 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], **lowerCamelCase : List[str] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Any, **lowerCamelCase : int ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Dict, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase, padding='''max_length''', max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''lower newer'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 719 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase__ ( self : str ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase__ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.get_input_output_texts(lowerCamelCase )
lowercase__ = tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.decode(lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def lowercase__ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''mecab''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MecabTokenizer(do_lower_case=lowerCamelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def lowercase__ ( self : Any ):
'''simple docstring'''
try:
lowercase__ = MecabTokenizer(
do_lower_case=lowerCamelCase, normalize_text=lowerCamelCase, mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = MecabTokenizer(normalize_text=lowerCamelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''], )
@require_sudachi
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
@require_sudachi
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人''', '''参政権'''] )
@require_sudachi
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人参政権'''] )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(do_lower_case=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(normalize_text=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = SudachiTokenizer(trim_whitespace=lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(lowerCamelCase )
lowercase__ = '''こんにちは、世界。\nこんばんは、世界。'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(lowerCamelCase, '''wb''' ) as handle:
pickle.dump(lowerCamelCase, lowerCamelCase )
with open(lowerCamelCase, '''rb''' ) as handle:
lowercase__ = pickle.load(lowerCamelCase )
lowercase__ = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(normalize_text=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer(trim_whitespace=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''], )
@require_jumanpp
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ), ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''], )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowercase__ = {}
for i, token in enumerate(lowerCamelCase ):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=lowerCamelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ), ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ), ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowercase__ = tokenizer.subword_tokenizer
lowercase__ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(lowerCamelCase, ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowercase__ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(lowerCamelCase, ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowercase__ = tokenizer.encode('''ありがとう。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''どういたしまして。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : str, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='''character''', **lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''こんにちは、世界。 \nこんばんは、世界。'''
lowercase__ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def lowercase__ ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.tokenizer_class(self.vocab_file, subword_tokenizer_type='''character''' )
lowercase__ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
lowerCamelCase, ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase__ = {}
for i, token in enumerate(lowerCamelCase ):
lowercase__ = i
lowercase__ = CharacterTokenizer(vocab=lowerCamelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowercase__ = tokenizer.encode('''ありがとう。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''どういたしまして。''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cl-tohoku/bert-base-japanese'''
lowercase__ = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowercase__ = '''bert-base-cased'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 720 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
A__ : int = None
A__ : Dict = logging.get_logger(__name__)
A__ : int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
A__ : Dict = {
'camembert-base': 5_12,
}
A__ : Any = '▁'
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = CamembertTokenizer
def __init__( self : Tuple, lowerCamelCase : str=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : int="<s>", lowerCamelCase : Union[str, Any]="</s>", lowerCamelCase : Any="</s>", lowerCamelCase : Optional[Any]="<s>", lowerCamelCase : int="<unk>", lowerCamelCase : Any="<pad>", lowerCamelCase : Optional[int]="<mask>", lowerCamelCase : Any=["<s>NOTUSED", "</s>NOTUSED"], **lowerCamelCase : str, ):
'''simple docstring'''
lowercase__ = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, sep_token=lowerCamelCase, cls_token=lowerCamelCase, unk_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowercase__ ( self : Any, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Tuple, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file, lowerCamelCase )
return (out_vocab_file,)
| 721 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
from manim import *
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = Rectangle(height=0.5, width=0.5 )
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = VGroup(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''CPU''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase )
lowercase__ = [mem.copy() for i in range(1 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''GPU''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
gpu.align_to(lowerCamelCase, lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*lowerCamelCase ).arrange(lowerCamelCase, buff=0 )
lowercase__ = Text('''Model''', font_size=24 )
lowercase__ = Group(lowerCamelCase, lowerCamelCase ).arrange(lowerCamelCase, buff=0.5, aligned_edge=lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase, run_time=1 ), Create(lowerCamelCase, run_time=1 ), Create(lowerCamelCase, run_time=1 ), )
lowercase__ = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""", font_size=24, )
lowercase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase, run_time=2.5 ), Write(lowerCamelCase ), Write(lowerCamelCase ) )
self.add(lowerCamelCase )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(lowerCamelCase ):
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase, opacity=0.7 )
cpu_target.move_to(lowerCamelCase )
cpu_target.generate_target()
lowercase__ = 0.46 / 4
lowercase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=lowerCamelCase, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=lowerCamelCase, buff=0.0 )
cpu_targs.append(lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase ) )
second_animations.append(MoveToTarget(lowerCamelCase, run_time=1.5 ) )
self.play(*lowerCamelCase )
self.play(*lowerCamelCase )
self.wait()
| 700 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = AudioLDMPipeline
lowercase__ = TEXT_TO_AUDIO_PARAMS
lowercase__ = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase__ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=(32, 64), class_embed_type='''simple_projection''', projection_class_embeddings_input_dim=32, class_embeddings_concat=lowerCamelCase, )
lowercase__ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowercase__ = ClapTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, projection_dim=32, )
lowercase__ = ClapTextModelWithProjection(lowerCamelCase )
lowercase__ = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''', model_max_length=77 )
lowercase__ = SpeechTaHifiGanConfig(
model_in_dim=8, sampling_rate=16_000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=lowerCamelCase, )
lowercase__ = SpeechTaHifiGan(lowerCamelCase )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[Any]=0 ):
'''simple docstring'''
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs['''prompt''']]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs.pop('''prompt''' )]
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=audioldm_pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = text_inputs['''input_ids'''].to(lowerCamelCase )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase, )
lowercase__ = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase, dim=-1 )
lowercase__ = prompt_embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * ['''this is a negative prompt''']
lowercase__ = negative_prompt
lowercase__ = 3 * [inputs['''prompt''']]
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = 3 * [inputs.pop('''prompt''' )]
lowercase__ = []
for p in [prompt, negative_prompt]:
lowercase__ = audioldm_pipe.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=audioldm_pipe.tokenizer.model_max_length, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = text_inputs['''input_ids'''].to(lowerCamelCase )
lowercase__ = audioldm_pipe.text_encoder(
lowerCamelCase, )
lowercase__ = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase__ = F.normalize(lowerCamelCase, dim=-1 )
embeds.append(lowerCamelCase )
lowercase__ , lowercase__ = embeds
# forward
lowercase__ = audioldm_pipe(**lowerCamelCase )
lowercase__ = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = '''egg cracking'''
lowercase__ = audioldm_pipe(**lowerCamelCase, negative_prompt=lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 256
lowercase__ = audio[:10]
lowercase__ = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe([prompt] * batch_size, num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase__ = 2
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=2, num_waveforms_per_prompt=lowerCamelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase__ = 2
lowercase__ = audioldm_pipe(
[prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=lowerCamelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = audioldm_pipe.vocoder.config.sampling_rate
lowercase__ = self.get_dummy_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(audio_length_in_s=0.016, **lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) / vocoder_sampling_rate == 0.016
lowercase__ = audioldm_pipe(audio_length_in_s=0.032, **lowerCamelCase )
lowercase__ = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) / vocoder_sampling_rate == 0.032
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = AudioLDMPipeline(**lowerCamelCase )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = ['''hey''']
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=1 )
lowercase__ = output.audios.shape
assert audio_shape == (1, 256)
lowercase__ = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase__ = SpeechTaHifiGan(lowerCamelCase ).to(lowerCamelCase )
lowercase__ = audioldm_pipe(lowerCamelCase, num_inference_steps=1 )
lowercase__ = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowercase__ ( self : int ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase )
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[Any]="cpu", lowerCamelCase : List[Any]=torch.floataa, lowerCamelCase : List[str]=0 ):
'''simple docstring'''
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = np.random.RandomState(lowerCamelCase ).standard_normal((1, 8, 128, 16) )
lowercase__ = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase, dtype=lowerCamelCase )
lowercase__ = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_inputs(lowerCamelCase )
lowercase__ = 25
lowercase__ = audioldm_pipe(**lowerCamelCase ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 81_920
lowercase__ = audio[77_230:77_240]
lowercase__ = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase__ = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase__ = audioldm_pipe.to(lowerCamelCase )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = self.get_inputs(lowerCamelCase )
lowercase__ = audioldm_pipe(**lowerCamelCase ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase ) == 81_920
lowercase__ = audio[27_780:27_790]
lowercase__ = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowercase__ = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 0 |
import argparse
import datetime
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
lowercase__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase_ ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
lowercase__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
lowercase__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
lowercase__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
lowercase__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
lowercase__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
lowercase__ = datetime.date(int(lowerCamelCase_ ) , int(lowerCamelCase_ ) , int(lowerCamelCase_ ) )
# Start math
if m <= 2:
lowercase__ = y - 1
lowercase__ = m + 12
# maths var
lowercase__ = int(str(lowerCamelCase_ )[:2] )
lowercase__ = int(str(lowerCamelCase_ )[2:] )
lowercase__ = int(2.6 * m - 5.39 )
lowercase__ = int(c / 4 )
lowercase__ = int(k / 4 )
lowercase__ = int(d + k )
lowercase__ = int(t + u + v + x )
lowercase__ = int(z - (2 * c) )
lowercase__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
lowercase__ = F"""Your date {date_input}, is a {days[str(lowerCamelCase_ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : str = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
A__ : Any = parser.parse_args()
zeller(args.date_input)
| 702 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 0 |
# Lint as: python3
import itertools
import os
import re
A__ : List[str] = re.compile(r'([A-Z]+)([A-Z][a-z])')
A__ : Optional[Any] = re.compile(r'([a-z\d])([A-Z])')
A__ : int = re.compile(r'(?<!_)_(?!_)')
A__ : int = re.compile(r'(_{2,})')
A__ : List[str] = r'^\w+(\.\w+)*$'
A__ : Any = r'<>:/\|?*'
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _uppercase_uppercase_re.sub(r'''\1_\2''' , lowerCamelCase_ )
lowercase__ = _lowercase_uppercase_re.sub(r'''\1_\2''' , lowerCamelCase_ )
return name.lower()
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _single_underscore_re.split(lowerCamelCase_ )
lowercase__ = [_multiple_underscores_re.split(lowerCamelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase_ ) if n != '''''' )
def a ( lowerCamelCase_ ):
'''simple docstring'''
if os.path.basename(lowerCamelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if os.path.basename(lowerCamelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase_ )}-{split}"""
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = filename_prefix_for_split(lowerCamelCase_ , lowerCamelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
lowercase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
return F"""{filepath}*"""
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = filename_prefix_for_split(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if shard_lengths:
lowercase__ = len(lowerCamelCase_ )
lowercase__ = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase_ )]
if filetype_suffix:
lowercase__ = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 703 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BridgeTowerImageProcessor"""
lowercase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : List[str], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : str, ):
'''simple docstring'''
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
# add pixel_values + pixel_mask
lowercase__ = self.image_processor(
lowerCamelCase, return_tensors=lowerCamelCase, do_normalize=lowerCamelCase, do_center_crop=lowerCamelCase, **lowerCamelCase )
encoding.update(lowerCamelCase )
return encoding
def lowercase__ ( self : str, *lowerCamelCase : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], *lowerCamelCase : Optional[int], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 704 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 0 |
import math
def a ( lowerCamelCase_ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( lowerCamelCase_ = 0.1 ):
'''simple docstring'''
lowercase__ = 3
lowercase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCamelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(lowerCamelCase_ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , lowerCamelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 706 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = BertConfig.from_json_file(lowerCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__ = BertForPreTraining(lowerCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase_ )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 707 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
from math import ceil
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = list(range(0 , lowerCamelCase_ ) )
lowercase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase__ = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCamelCase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCamelCase_ )
# Missing blocks
lowercase__ = [i for i in blocks if i not in device_map_blocks]
lowercase__ = [i for i in device_map_blocks if i not in blocks]
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(lowerCamelCase_ ) )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = list(range(lowerCamelCase_ ) )
lowercase__ = int(ceil(n_layers / len(lowerCamelCase_ ) ) )
lowercase__ = [layers[i : i + n_blocks] for i in range(0 , lowerCamelCase_ , lowerCamelCase_ )]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) | 708 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 709 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 710 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 0 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowercase__ = sorted(string.lower() )
return len(lowerCamelCase_ ) == len(set(lowerCamelCase_ ) )
if __name__ == "__main__":
A__ : int = input('Enter a string ').strip()
A__ : Optional[Any] = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 711 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 3
lowercase__ = (32, 32)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def lowercase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
return model
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
return model
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
return CLIPTextModel(lowerCamelCase )
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
def extract(*lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[Any] ):
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
lowercase__ = torch.ones([0] )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[str] ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase )
return self
return Out()
return extract
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowercase__ = sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
lowercase__ = output.images
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=lowerCamelCase, )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowercase__ = sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
lowercase__ = output.images
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=lowerCamelCase, )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''', safety_checker=lowerCamelCase )
assert isinstance(lowerCamelCase, lowerCamelCase )
assert isinstance(pipe.scheduler, lowerCamelCase )
assert pipe.safety_checker is None
lowercase__ = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase )
lowercase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__ = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase__ = unet.half()
lowercase__ = vae.half()
lowercase__ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = sd_pipe([prompt], num_inference_steps=2, output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=lowerCamelCase )
lowercase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase__ = 4_003_660_346
lowercase__ = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=lowerCamelCase )
lowercase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase__ = 2_734_971_755
lowercase__ = 7
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase__ = 1_044_355_234
lowercase__ = 12
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 712 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
import math
def a ( lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 ):
'''simple docstring'''
lowercase__ = end or len(lowerCamelCase_ )
for i in range(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = i
lowercase__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowercase__ = array[temp_index - 1]
temp_index -= 1
lowercase__ = temp_index_value
return array
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): # Max Heap
'''simple docstring'''
lowercase__ = index
lowercase__ = 2 * index + 1 # Left Node
lowercase__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowercase__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowercase__ = right_index
if largest != index:
lowercase__ , lowercase__ = array[largest], array[index]
heapify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = len(lowerCamelCase_ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for i in range(n - 1 , 0 , -1 ):
lowercase__ , lowercase__ = array[0], array[i]
heapify(lowerCamelCase_ , 0 , lowerCamelCase_ )
return array
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = low
lowercase__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowercase__ , lowercase__ = array[j], array[i]
i += 1
def a ( lowerCamelCase_ ):
'''simple docstring'''
if len(lowerCamelCase_ ) == 0:
return array
lowercase__ = 2 * math.ceil(math.loga(len(lowerCamelCase_ ) ) )
lowercase__ = 16
return intro_sort(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase_ )
max_depth -= 1
lowercase__ = median_of_a(lowerCamelCase_ , lowerCamelCase_ , start + ((end - start) // 2) + 1 , end - 1 )
lowercase__ = partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
intro_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = p
return insertion_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Tuple = input('Enter numbers separated by a comma : ').strip()
A__ : Union[str, Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 713 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Dict = None
A__ : int = logging.get_logger(__name__)
A__ : int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A__ : str = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
A__ : Any = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
A__ : Optional[int] = '▁'
# Segments (not really needed)
A__ : Tuple = 0
A__ : Tuple = 1
A__ : Dict = 2
A__ : Any = 3
A__ : Any = 4
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = """left"""
lowercase__ = XLNetTokenizer
def __init__( self : Optional[Any], lowerCamelCase : Tuple=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Tuple=False, lowerCamelCase : List[Any]=True, lowerCamelCase : Optional[int]=False, lowerCamelCase : Optional[int]="<s>", lowerCamelCase : Any="</s>", lowerCamelCase : Dict="<unk>", lowerCamelCase : Optional[Any]="<sep>", lowerCamelCase : Optional[Any]="<pad>", lowerCamelCase : List[Any]="<cls>", lowerCamelCase : Optional[int]="<mask>", lowerCamelCase : List[Any]=["<eop>", "<eod>"], **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = AddedToken(lowerCamelCase, lstrip=lowerCamelCase, rstrip=lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else mask_token
super().__init__(
vocab_file=lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, remove_space=lowerCamelCase, keep_accents=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase__ ( self : Optional[int], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file, lowerCamelCase )
return (out_vocab_file,)
| 714 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowercase__ = """CIDAS/clipseg-rd64-refined"""
lowercase__ = """image_segmenter"""
lowercase__ = CLIPSegForImageSegmentation
lowercase__ = ["""image""", """text"""]
lowercase__ = ["""image"""]
def __init__( self : Optional[Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self, ['''vision'''] )
super().__init__(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : "Image", lowerCamelCase : str ):
'''simple docstring'''
return self.pre_processor(text=[label], images=[image], padding=lowerCamelCase, return_tensors='''pt''' )
def lowercase__ ( self : Any, lowerCamelCase : Optional[int] ):
'''simple docstring'''
with torch.no_grad():
lowercase__ = self.model(**lowerCamelCase ).logits
return logits
def lowercase__ ( self : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = outputs.cpu().detach().numpy()
lowercase__ = 0
lowercase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 0 |
import os
def a ( ):
'''simple docstring'''
lowercase__ = os.path.dirname(os.path.realpath(lowerCamelCase_ ) )
lowercase__ = os.path.join(lowerCamelCase_ , '''triangle.txt''' )
with open(lowerCamelCase_ ) as f:
lowercase__ = f.readlines()
lowercase__ = []
for line in triangle:
lowercase__ = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(lowerCamelCase_ ) )
a.append(lowerCamelCase_ )
for i in range(1 , len(lowerCamelCase_ ) ):
for j in range(len(a[i] ) ):
lowercase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowercase__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase_ , lowerCamelCase_ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 716 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 0 |
import operator
def a ( lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = None ):
'''simple docstring'''
lowercase__ = operator.lt if reverse else operator.gt
lowercase__ = solution or []
if not arr:
return solution
lowercase__ = [arr.pop(0 )]
for i, item in enumerate(lowerCamelCase_ ):
if _operator(lowerCamelCase_ , sublist[-1] ):
sublist.append(lowerCamelCase_ )
arr.pop(lowerCamelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase_ )
else:
while sublist:
lowercase__ = sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase_ ):
if not _operator(lowerCamelCase_ , lowerCamelCase_ ):
solution.insert(lowerCamelCase_ , lowerCamelCase_ )
break
else:
solution.append(lowerCamelCase_ )
strand_sort(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 717 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : List[str] = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ['DeiTFeatureExtractor']
A__ : str = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Dict = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A__ : Tuple = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Optional[int], lowerCamelCase : Any=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : List[str]=30, lowerCamelCase : Optional[Any]=400, lowerCamelCase : List[Any]=None, lowerCamelCase : Dict=True, lowerCamelCase : int=True, lowerCamelCase : Dict=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''height''': 20, '''width''': 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1_024, 2_048, 4_096]
lowercase__ = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowercase__ ( self : Dict ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" ,)
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_convert_rgb''' ) )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2_048
lowercase__ = image_processor(lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1E-3, rtol=1E-3 ) )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCamelCase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
lowercase__ = '''Hello'''
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase, header_text=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase, header_text=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" ,)
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def lowercase__ ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_convert_rgb''' ) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
lowerCamelCase, return_tensors='''pt''', max_patches=lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 719 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
from math import sqrt
def a ( lowerCamelCase_ = 100_0000 ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = 0
lowercase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCamelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 720 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 721 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
'''simple docstring'''
import requests
a : str = """YOUR API KEY"""
def __lowerCamelCase ( _lowercase , _lowercase = giphy_api_key ) -> list:
UpperCAmelCase : Union[str, Any] = """+""".join(query.split() )
UpperCAmelCase : List[Any] = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
UpperCAmelCase : List[Any] = requests.get(_lowercase ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 672 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=False , A=True , A="None" , A=3 , A=4 , A=None , ) -> str:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : str = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Any = use_input_mask
UpperCAmelCase : Dict = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : str = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : List[Any] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : List[Any] = num_choices
UpperCAmelCase : Optional[Any] = relative_attention
UpperCAmelCase : int = position_biased_input
UpperCAmelCase : Union[str, Any] = pos_att_type
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : str = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Any = None
UpperCAmelCase : Any = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> List[Any]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowercase( self , A ) -> List[str]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowercase( self , A , A , A , A , A , A , A ) -> List[Any]:
UpperCAmelCase : Tuple = DebertaVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A )[0]
UpperCAmelCase : List[str] = model(A , token_type_ids=A )[0]
UpperCAmelCase : Union[str, Any] = model(A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowercase( self , A , A , A , A , A , A , A ) -> int:
UpperCAmelCase : str = DebertaVaForMaskedLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : str = self.num_labels
UpperCAmelCase : int = DebertaVaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A )
def _lowercase( self , A , A , A , A , A , A , A ) -> List[Any]:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : int = DebertaVaForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A , A ) -> List[str]:
UpperCAmelCase : Optional[Any] = DebertaVaForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase : str = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A , A , A , A ) -> Any:
UpperCAmelCase : str = DebertaVaForMultipleChoice(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : Any = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : int = config_and_inputs
UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> str:
UpperCAmelCase : Dict = DebertaVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> List[str]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*A )
@slow
def _lowercase( self ) -> Tuple:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = DebertaVaModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def _lowercase( self ) -> List[str]:
pass
@slow
def _lowercase( self ) -> Dict:
UpperCAmelCase : List[Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[str] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 672 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a : int = logging.get_logger(__name__)
a : Optional[Any] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
for attribute in key.split(""".""" ):
UpperCAmelCase : Tuple = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase : Optional[Any] = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase : List[Any] = value
elif weight_type == "bias":
UpperCAmelCase : Union[str, Any] = value
else:
UpperCAmelCase : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Any:
UpperCAmelCase : int = []
UpperCAmelCase : List[Any] = fairseq_model.state_dict()
UpperCAmelCase : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : int = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : List[str] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase : str = name.split(_lowercase )[0].split(""".""" )[-2]
UpperCAmelCase : Optional[int] = mapped_key.replace("""*""" , _lowercase )
if "weight_g" in name:
UpperCAmelCase : Optional[int] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : Union[str, Any] = """weight_v"""
elif "weight" in name:
UpperCAmelCase : Any = """weight"""
elif "bias" in name:
UpperCAmelCase : Any = """bias"""
else:
UpperCAmelCase : Tuple = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : Tuple = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Optional[int] = name.split(""".""" )
UpperCAmelCase : Union[str, Any] = int(items[0] )
UpperCAmelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
UpperCAmelCase : int = SEWConfig()
if is_finetuned:
UpperCAmelCase : Union[str, Any] = model.wav_encoder.wav_model.cfg
else:
UpperCAmelCase : Tuple = model.cfg
UpperCAmelCase : Tuple = fs_config.conv_bias
UpperCAmelCase : Dict = eval(fs_config.conv_feature_layers )
UpperCAmelCase : Any = [x[0] for x in conv_layers]
UpperCAmelCase : List[Any] = [x[1] for x in conv_layers]
UpperCAmelCase : Union[str, Any] = [x[2] for x in conv_layers]
UpperCAmelCase : Tuple = """gelu"""
UpperCAmelCase : Optional[Any] = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
UpperCAmelCase : List[str] = 0.0
UpperCAmelCase : List[Any] = fs_config.activation_fn.name
UpperCAmelCase : Optional[Any] = fs_config.encoder_embed_dim
UpperCAmelCase : Optional[Any] = 0.02
UpperCAmelCase : str = fs_config.encoder_ffn_embed_dim
UpperCAmelCase : Union[str, Any] = 1e-5
UpperCAmelCase : Union[str, Any] = fs_config.encoder_layerdrop
UpperCAmelCase : Union[str, Any] = fs_config.encoder_attention_heads
UpperCAmelCase : str = fs_config.conv_pos_groups
UpperCAmelCase : Dict = fs_config.conv_pos
UpperCAmelCase : Optional[Any] = len(_lowercase )
UpperCAmelCase : List[str] = fs_config.encoder_layers
UpperCAmelCase : Tuple = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCAmelCase : List[Any] = model.cfg
UpperCAmelCase : int = fs_config.final_dropout
UpperCAmelCase : Union[str, Any] = fs_config.layerdrop
UpperCAmelCase : Any = fs_config.activation_dropout
UpperCAmelCase : List[str] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCAmelCase : Tuple = fs_config.attention_dropout
UpperCAmelCase : Optional[int] = fs_config.dropout_input
UpperCAmelCase : Any = fs_config.dropout
UpperCAmelCase : Tuple = fs_config.mask_channel_length
UpperCAmelCase : int = fs_config.mask_channel_prob
UpperCAmelCase : Union[str, Any] = fs_config.mask_length
UpperCAmelCase : str = fs_config.mask_prob
UpperCAmelCase : Any = """Wav2Vec2FeatureExtractor"""
UpperCAmelCase : Any = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ) -> Dict:
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCAmelCase : str = SEWConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : str = convert_config(model[0] , _lowercase )
UpperCAmelCase : Optional[int] = model[0].eval()
UpperCAmelCase : List[str] = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
if is_finetuned:
if dict_path:
UpperCAmelCase : Optional[Any] = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : List[Any] = target_dict.pad_index
UpperCAmelCase : int = target_dict.bos_index
UpperCAmelCase : Dict = target_dict.pad_index
UpperCAmelCase : List[str] = target_dict.bos_index
UpperCAmelCase : Tuple = target_dict.eos_index
UpperCAmelCase : Optional[int] = len(target_dict.symbols )
UpperCAmelCase : int = os.path.join(_lowercase , """vocab.json""" )
if not os.path.isdir(_lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowercase )
UpperCAmelCase : List[str] = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowercase , )
UpperCAmelCase : Tuple = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
UpperCAmelCase : List[Any] = SEWForCTC(_lowercase )
else:
UpperCAmelCase : List[Any] = SEWModel(_lowercase )
feature_extractor.save_pretrained(_lowercase )
recursively_load_weights(_lowercase , _lowercase , _lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a : Any = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 672 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = 1_0_0 ) -> int:
UpperCAmelCase : List[Any] = (n * (n + 1) // 2) ** 2
UpperCAmelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 672 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a : int = logging.get_logger(__name__)
a : Optional[int] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
a : Optional[int] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCAmelCase : Any = """lm_head"""
UpperCAmelCase : Dict = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase : Any = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase : List[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : Any = value
elif weight_type == "bias":
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : str = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCAmelCase : str = []
UpperCAmelCase : Optional[Any] = fairseq_model.state_dict()
UpperCAmelCase : Any = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase : str = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : List[str] = name.split(_lowercase )[0].split(""".""" )[-2]
UpperCAmelCase : Optional[Any] = mapped_key.replace("""*""" , _lowercase )
if "weight_g" in name:
UpperCAmelCase : Tuple = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : int = """weight_v"""
elif "bias" in name:
UpperCAmelCase : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : str = """weight"""
else:
UpperCAmelCase : Dict = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : List[Any] = name.split(""".""" )
UpperCAmelCase : Optional[int] = int(items[0] )
UpperCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ) -> Union[str, Any]:
if config_path is not None:
UpperCAmelCase : Tuple = UniSpeechConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : Any = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase : Optional[Any] = Dictionary.load_from_json(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase : Union[str, Any] = target_dict.pad_index
UpperCAmelCase : List[Any] = target_dict.bos_index
UpperCAmelCase : Tuple = target_dict.eos_index
UpperCAmelCase : Dict = len(target_dict.symbols )
UpperCAmelCase : str = os.path.join(_lowercase , """vocab.json""" )
if not os.path.isdir(_lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase : List[Any] = 4_2
UpperCAmelCase : Dict = 4_3
with open(_lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowercase , _lowercase )
UpperCAmelCase : str = WavaVecaPhonemeCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowercase , )
UpperCAmelCase : Optional[int] = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
UpperCAmelCase : Dict = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
UpperCAmelCase : List[Any] = UniSpeechForCTC(_lowercase )
else:
UpperCAmelCase : Union[str, Any] = UniSpeechForPreTraining(_lowercase )
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase : str = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , _lowercase )
hf_unispeech.save_pretrained(_lowercase )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a : Any = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 672 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
a : Optional[Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
UpperCAmelCase : Optional[Any] = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase : Any = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCAmelCase : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase : Dict = value
elif weight_type == "bias":
UpperCAmelCase : int = value
else:
UpperCAmelCase : Tuple = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : int = []
UpperCAmelCase : Any = fairseq_model.state_dict()
UpperCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase : Dict = True
if "*" in mapped_key:
UpperCAmelCase : Optional[Any] = name.split(_lowercase )[0].split(""".""" )[-2]
UpperCAmelCase : Union[str, Any] = mapped_key.replace("""*""" , _lowercase )
if "weight_g" in name:
UpperCAmelCase : Any = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : Tuple = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase : Union[str, Any] = """weight"""
else:
UpperCAmelCase : Optional[Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Dict = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Any = name.split(""".""" )
UpperCAmelCase : Optional[int] = int(items[0] )
UpperCAmelCase : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCAmelCase : int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCAmelCase : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCAmelCase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCAmelCase : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None ) -> Dict:
# load the pre-trained checkpoints
UpperCAmelCase : Optional[Any] = torch.load(_lowercase )
UpperCAmelCase : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
UpperCAmelCase : Union[str, Any] = WavLMOrig(_lowercase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
UpperCAmelCase : Union[str, Any] = WavLMConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : List[Any] = WavLMConfig()
UpperCAmelCase : Optional[Any] = WavLMModel(_lowercase )
recursively_load_weights(_lowercase , _lowercase )
hf_wavlm.save_pretrained(_lowercase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
a : Union[str, Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 672 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['pixel_values']
def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = None , A = True , A = 1 / 255 , A = True , A = None , A = None , A = True , **A , ) -> None:
super().__init__(**A )
UpperCAmelCase : str = size if size is not None else {"""shortest_edge""": 224}
UpperCAmelCase : Optional[Any] = get_size_dict(A , default_to_square=A )
UpperCAmelCase : List[str] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase : List[str] = get_size_dict(A , default_to_square=A , param_name="""crop_size""" )
UpperCAmelCase : Optional[int] = do_resize
UpperCAmelCase : Dict = size
UpperCAmelCase : Dict = resample
UpperCAmelCase : Optional[int] = do_center_crop
UpperCAmelCase : Tuple = crop_size
UpperCAmelCase : List[Any] = do_rescale
UpperCAmelCase : Dict = rescale_factor
UpperCAmelCase : Optional[Any] = do_normalize
UpperCAmelCase : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase : Union[str, Any] = do_convert_rgb
def _lowercase( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
UpperCAmelCase : Dict = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase : Tuple = get_resize_output_image_size(A , size=size["""shortest_edge"""] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def _lowercase( self , A , A , A = None , **A , ) -> np.ndarray:
UpperCAmelCase : Union[str, Any] = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def _lowercase( self , A , A , A = None , **A , ) -> Optional[int]:
return rescale(A , scale=A , data_format=A , **A )
def _lowercase( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def _lowercase( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Tuple = size if size is not None else self.size
UpperCAmelCase : List[str] = get_size_dict(A , param_name="""size""" , default_to_square=A )
UpperCAmelCase : int = resample if resample is not None else self.resample
UpperCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : Optional[int] = get_size_dict(A , param_name="""crop_size""" , default_to_square=A )
UpperCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase : Optional[Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase : Union[str, Any] = [convert_to_rgb(A ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase : Any = [to_numpy_array(A ) for image in images]
if do_resize:
UpperCAmelCase : Any = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
UpperCAmelCase : Tuple = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
UpperCAmelCase : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
UpperCAmelCase : int = [self.normalize(image=A , mean=A , std=A ) for image in images]
UpperCAmelCase : int = [to_channel_dimension_format(A , A ) for image in images]
UpperCAmelCase : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 672 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 1 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str:
UpperCAmelCase : List[Any] = os.path.abspath(_lowercase )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
UpperCAmelCase : Optional[Any] = tf.train.list_variables(_lowercase )
UpperCAmelCase : Any = []
UpperCAmelCase : List[str] = []
UpperCAmelCase : Any = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
UpperCAmelCase : Optional[Any] = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
UpperCAmelCase : List[str] = name[1:]
# figure out how many levels deep the name is
UpperCAmelCase : Any = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_lowercase )
# read data
UpperCAmelCase : Dict = tf.train.load_variable(_lowercase , _lowercase )
names.append("""/""".join(_lowercase ) )
arrays.append(_lowercase )
logger.info(F'''Read a total of {len(_lowercase ):,} layers''' )
# Sanity check
if len(set(_lowercase ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(_lowercase ) )})''' )
UpperCAmelCase : Tuple = list(set(_lowercase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_lowercase , _lowercase ):
UpperCAmelCase : Optional[Any] = full_name.split("""/""" )
UpperCAmelCase : Optional[Any] = model
UpperCAmelCase : str = []
for i, m_name in enumerate(_lowercase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
UpperCAmelCase : Union[str, Any] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
UpperCAmelCase : int = getattr(_lowercase , """embeddings""" )
UpperCAmelCase : Optional[int] = getattr(_lowercase , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
UpperCAmelCase : Any = getattr(_lowercase , """encoder""" )
UpperCAmelCase : Union[str, Any] = getattr(_lowercase , """layer""" )
UpperCAmelCase : str = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
UpperCAmelCase : int = getattr(_lowercase , """pooler""" )
UpperCAmelCase : Optional[Any] = getattr(_lowercase , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
UpperCAmelCase : Any = getattr(_lowercase , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
UpperCAmelCase : Tuple = getattr(_lowercase , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
UpperCAmelCase : Dict = getattr(_lowercase , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
UpperCAmelCase : Optional[int] = getattr(_lowercase , """token_type_embeddings""" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
UpperCAmelCase : str = getattr(_lowercase , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
UpperCAmelCase : Optional[Any] = getattr(_lowercase , """attention""" )
UpperCAmelCase : Optional[int] = getattr(_lowercase , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
UpperCAmelCase : List[str] = getattr(_lowercase , """attention""" )
UpperCAmelCase : Any = getattr(_lowercase , """output""" )
UpperCAmelCase : List[Any] = getattr(_lowercase , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
UpperCAmelCase : Optional[Any] = getattr(_lowercase , """attention""" )
UpperCAmelCase : List[str] = getattr(_lowercase , """output""" )
UpperCAmelCase : Any = getattr(_lowercase , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
UpperCAmelCase : Dict = getattr(_lowercase , """output""" )
UpperCAmelCase : Any = getattr(_lowercase , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
UpperCAmelCase : List[str] = getattr(_lowercase , """output""" )
UpperCAmelCase : Union[str, Any] = getattr(_lowercase , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
UpperCAmelCase : List[str] = getattr(_lowercase , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
UpperCAmelCase : Optional[Any] = getattr(_lowercase , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
UpperCAmelCase : Any = getattr(_lowercase , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
UpperCAmelCase : Optional[int] = getattr(_lowercase , """intermediate""" )
UpperCAmelCase : Any = getattr(_lowercase , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
UpperCAmelCase : Union[str, Any] = getattr(_lowercase , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
UpperCAmelCase : Union[str, Any] = getattr(_lowercase , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
UpperCAmelCase : Optional[int] = getattr(_lowercase , """weight""" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
UpperCAmelCase : Optional[int] = """.""".join(_lowercase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _lowercase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , _lowercase ):
UpperCAmelCase : Tuple = array.reshape(pointer.data.shape )
if "kernel" in full_name:
UpperCAmelCase : Union[str, Any] = array.transpose()
if pointer.shape == array.shape:
UpperCAmelCase : Tuple = torch.from_numpy(_lowercase )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
UpperCAmelCase : str = BertConfig.from_json_file(_lowercase )
UpperCAmelCase : List[Any] = BertModel(_lowercase )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
a : Optional[Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 672 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 1 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowerCamelCase ( _lowercase ) -> Tuple:
# getting number of pixels in the image
UpperCAmelCase , UpperCAmelCase : List[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_lowercase ):
for j in range(_lowercase ):
UpperCAmelCase : int = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a : Optional[int] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
a : List[str] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 672 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def _lowercase( self , A ) -> str:
UpperCAmelCase : Any = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
UpperCAmelCase : Tuple = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=A , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def _lowercase( self , A ) -> Any:
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def _lowercase( self , A ) -> str:
# create estimator
UpperCAmelCase : Union[str, Any] = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 1 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class UpperCamelCase_ :
def __init__( self , A , A , A , A=None , A=None ) -> Dict:
UpperCAmelCase : Dict = start
UpperCAmelCase : List[Any] = end
UpperCAmelCase : Any = val
UpperCAmelCase : List[str] = (start + end) // 2
UpperCAmelCase : Any = left
UpperCAmelCase : Any = right
def __repr__( self ) -> str:
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class UpperCamelCase_ :
def __init__( self , A , A ) -> Optional[Any]:
UpperCAmelCase : int = collection
UpperCAmelCase : Any = function
if self.collection:
UpperCAmelCase : List[Any] = self._build_tree(0 , len(A ) - 1 )
def _lowercase( self , A , A ) -> List[Any]:
self._update_tree(self.root , A , A )
def _lowercase( self , A , A ) -> Union[str, Any]:
return self._query_range(self.root , A , A )
def _lowercase( self , A , A ) -> Optional[Any]:
if start == end:
return SegmentTreeNode(A , A , self.collection[start] )
UpperCAmelCase : int = (start + end) // 2
UpperCAmelCase : Tuple = self._build_tree(A , A )
UpperCAmelCase : Any = self._build_tree(mid + 1 , A )
return SegmentTreeNode(A , A , self.fn(left.val , right.val ) , A , A )
def _lowercase( self , A , A , A ) -> Union[str, Any]:
if node.start == i and node.end == i:
UpperCAmelCase : Optional[int] = val
return
if i <= node.mid:
self._update_tree(node.left , A , A )
else:
self._update_tree(node.right , A , A )
UpperCAmelCase : List[str] = self.fn(node.left.val , node.right.val )
def _lowercase( self , A , A , A ) -> List[str]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , A , A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , A , node.mid ) , self._query_range(node.right , node.mid + 1 , A ) , )
else:
# range in right child tree
return self._query_range(node.right , A , A )
def _lowercase( self ) -> List[Any]:
if self.root is not None:
UpperCAmelCase : Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
UpperCAmelCase : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 5_0)
a : Optional[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 672 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.0_2 , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=None , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = num_stages
UpperCAmelCase : Any = hidden_sizes
UpperCAmelCase : str = depths
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = out_features
UpperCAmelCase : List[str] = out_indices
UpperCAmelCase : Any = scope
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _lowercase( self ) -> Optional[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=A , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase( self , A , A , A ) -> Optional[Any]:
UpperCAmelCase : int = ConvNextVaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : List[str] = ConvNextVaForImageClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Any = model(A )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase : Any = None
UpperCAmelCase : Optional[int] = ConvNextVaBackbone(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = config_and_inputs
UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs
UpperCAmelCase : List[str] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Dict = ConvNextVaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def _lowercase( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase( self ) -> List[str]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _lowercase( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _lowercase( self ) -> Any:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _lowercase( self ) -> int:
pass
def _lowercase( self ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : Optional[int] = True
if model_class.__name__ in [
*get_values(A ),
*get_values(A ),
]:
continue
UpperCAmelCase : Any = model_class(A )
model.to(A )
model.train()
UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_with_labels()
UpperCAmelCase : List[str] = False
UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(A ), *get_values(A )]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase : Dict = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase : Any = self._prepare_for_class(A , A , return_labels=A )
UpperCAmelCase : Any = model(**A ).loss
loss.backward()
def _lowercase( self ) -> Tuple:
UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : int = model_class(A )
UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Tuple = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> List[str]:
def check_hidden_states_output(A , A , A ):
UpperCAmelCase : Optional[Any] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(A , A ) )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : int = True
check_hidden_states_output(A , A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def _lowercase( self ) -> Any:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Tuple = ConvNextVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(A )
UpperCAmelCase : List[Any] = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = preprocessor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**A )
# verify the logits
UpperCAmelCase : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A )
UpperCAmelCase : Dict = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
| 672 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'realm'
def __init__( self , A=30522 , A=768 , A=128 , A=12 , A=12 , A=8 , A=3072 , A="gelu_new" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=256 , A=10 , A=1e-3 , A=5 , A=320 , A=13353718 , A=5000 , A=1 , A=0 , A=2 , **A , ) -> Tuple:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
# Common config
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : str = retriever_proj_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = num_candidates
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Any = layer_norm_eps
# Reader config
UpperCAmelCase : Union[str, Any] = span_hidden_size
UpperCAmelCase : List[Any] = max_span_width
UpperCAmelCase : str = reader_layer_norm_eps
UpperCAmelCase : Tuple = reader_beam_size
UpperCAmelCase : Any = reader_seq_len
# Retrieval config
UpperCAmelCase : Tuple = num_block_records
UpperCAmelCase : Dict = searcher_beam_size
| 672 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a : Dict = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase( self , A , A , A=False ) -> int:
if return_pvalue:
UpperCAmelCase : int = pearsonr(A , A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(A , A )[0] )}
| 672 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Any = logging.get_logger(__name__)
a : Dict = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'deta'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , A=None , A=900 , A=2048 , A=6 , A=2048 , A=8 , A=6 , A=1024 , A=8 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=True , A=False , A="sine" , A=5 , A=4 , A=4 , A=True , A=300 , A=True , A=True , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , A=0.2_5 , **A , ) -> List[Any]:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(A , A ):
UpperCAmelCase : Dict = backbone_config.pop("""model_type""" )
UpperCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : int = config_class.from_dict(A )
UpperCAmelCase : Optional[Any] = backbone_config
UpperCAmelCase : Union[str, Any] = num_queries
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : List[Any] = d_model
UpperCAmelCase : List[str] = encoder_ffn_dim
UpperCAmelCase : Union[str, Any] = encoder_layers
UpperCAmelCase : int = encoder_attention_heads
UpperCAmelCase : Dict = decoder_ffn_dim
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Optional[int] = decoder_attention_heads
UpperCAmelCase : str = dropout
UpperCAmelCase : Any = attention_dropout
UpperCAmelCase : Optional[int] = activation_dropout
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[Any] = init_std
UpperCAmelCase : Optional[int] = init_xavier_std
UpperCAmelCase : str = encoder_layerdrop
UpperCAmelCase : Any = auxiliary_loss
UpperCAmelCase : Optional[int] = position_embedding_type
# deformable attributes
UpperCAmelCase : Dict = num_feature_levels
UpperCAmelCase : List[Any] = encoder_n_points
UpperCAmelCase : Optional[Any] = decoder_n_points
UpperCAmelCase : Union[str, Any] = two_stage
UpperCAmelCase : str = two_stage_num_proposals
UpperCAmelCase : Optional[Any] = with_box_refine
UpperCAmelCase : int = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCAmelCase : int = class_cost
UpperCAmelCase : Optional[Any] = bbox_cost
UpperCAmelCase : int = giou_cost
# Loss coefficients
UpperCAmelCase : Optional[int] = mask_loss_coefficient
UpperCAmelCase : Tuple = dice_loss_coefficient
UpperCAmelCase : Tuple = bbox_loss_coefficient
UpperCAmelCase : str = giou_loss_coefficient
UpperCAmelCase : List[Any] = eos_coefficient
UpperCAmelCase : Dict = focal_alpha
super().__init__(is_encoder_decoder=A , **A )
@property
def _lowercase( self ) -> int:
return self.encoder_attention_heads
@property
def _lowercase( self ) -> int:
return self.d_model
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase : List[str] = self.backbone_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 672 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( _lowercase , _lowercase ) -> str | Literal[False]:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : str = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(_lowercase )
def __lowerCamelCase ( _lowercase ) -> list[str]:
UpperCAmelCase : List[str] = []
while True:
UpperCAmelCase : Optional[int] = ["""$"""] * len(_lowercase )
UpperCAmelCase : int = []
for i in range(len(_lowercase ) ):
for j in range(i + 1 , len(_lowercase ) ):
UpperCAmelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase : Union[str, Any] = """*"""
UpperCAmelCase : Optional[Any] = """*"""
temp.append("""X""" )
for i in range(len(_lowercase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowercase ) == 0:
return pi
UpperCAmelCase : List[Any] = list(set(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Dict = []
for minterm in minterms:
UpperCAmelCase : List[str] = """"""
for _ in range(_lowercase ):
UpperCAmelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowercase )
return temp
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Optional[int] = list(_lowercase )
UpperCAmelCase : Dict = list(_lowercase )
UpperCAmelCase : Dict = 0
for i in range(len(_lowercase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[str]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[int] = [0] * len(_lowercase )
for i in range(len(chart[0] ) ):
UpperCAmelCase : Any = 0
UpperCAmelCase : Optional[Any] = -1
for j in range(len(_lowercase ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase : str = j
if count == 1:
UpperCAmelCase : Optional[int] = 1
for i in range(len(_lowercase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowercase ) ):
UpperCAmelCase : List[str] = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = -1
UpperCAmelCase : Union[str, Any] = 0
for i in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase : Union[str, Any] = count_n
UpperCAmelCase : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowercase ) ):
UpperCAmelCase : Optional[Any] = 0
def __lowerCamelCase ( _lowercase , _lowercase ) -> list[list[int]]:
UpperCAmelCase : Optional[int] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )]
for i in range(len(_lowercase ) ):
UpperCAmelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(_lowercase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowercase ):
UpperCAmelCase : List[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : str = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase : List[Any] = [
float(_lowercase )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase : str = decimal_to_binary(_lowercase , _lowercase )
UpperCAmelCase : Tuple = check(_lowercase )
print("""Prime Implicants are:""" )
print(_lowercase )
UpperCAmelCase : Union[str, Any] = prime_implicant_chart(_lowercase , _lowercase )
UpperCAmelCase : Tuple = selection(_lowercase , _lowercase )
print("""Essential Prime Implicants are:""" )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Union[str, Any] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
a : str = logging.get_logger(__name__)
a : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
a : List[str] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
a : List[Any] = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : Dict = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase : Tuple = bs[:]
UpperCAmelCase : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase : int = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def __lowerCamelCase ( _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[Any] = set()
UpperCAmelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : Any = char
return pairs
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> Optional[int]:
UpperCAmelCase : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
UpperCAmelCase : List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase : Union[str, Any] = json.load(A )
UpperCAmelCase : str = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Optional[int] = errors # how to handle errors in decoding
UpperCAmelCase : List[str] = bytes_to_unicode()
UpperCAmelCase : Any = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase : Dict = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase : Optional[int] = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase( self ) -> int:
return len(self.encoder )
def _lowercase( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase( self , A ) -> int:
if token in self.cache:
return self.cache[token]
UpperCAmelCase : List[Any] = tuple(A )
UpperCAmelCase : List[Any] = get_pairs(A )
if not pairs:
return token
while True:
UpperCAmelCase : Dict = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : int = bigram
UpperCAmelCase : Tuple = []
UpperCAmelCase : Dict = 0
while i < len(A ):
try:
UpperCAmelCase : List[Any] = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Dict = tuple(A )
UpperCAmelCase : Any = new_word
if len(A ) == 1:
break
else:
UpperCAmelCase : Optional[int] = get_pairs(A )
UpperCAmelCase : Optional[int] = """ """.join(A )
UpperCAmelCase : str = word
return word
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : int = []
for token in re.findall(self.pat , A ):
UpperCAmelCase : int = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def _lowercase( self , A ) -> Dict:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def _lowercase( self , A ) -> Dict:
return self.decoder.get(A )
def _lowercase( self , A ) -> Any:
UpperCAmelCase : Union[str, Any] = """""".join(A )
UpperCAmelCase : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Dict = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Dict = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
UpperCAmelCase : str = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase : Optional[Any] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
UpperCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A=False , **A ) -> str:
UpperCAmelCase : Tuple = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
UpperCAmelCase : List[str] = """ """ + text
return (text, kwargs)
def _lowercase( self , A , A = None , A = PaddingStrategy.DO_NOT_PAD , A = None , A = None , ) -> dict:
UpperCAmelCase : Union[str, Any] = super()._pad(
encoded_inputs=A , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
UpperCAmelCase : Dict = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase : int = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[int] = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 1 |
'''simple docstring'''
from __future__ import annotations
a : str = list[tuple[int, int]]
a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCamelCase_ :
def __init__( self , A , A , A , A , A , A , ) -> str:
UpperCAmelCase : List[str] = pos_x
UpperCAmelCase : List[str] = pos_y
UpperCAmelCase : Optional[int] = (pos_y, pos_x)
UpperCAmelCase : int = goal_x
UpperCAmelCase : int = goal_y
UpperCAmelCase : Union[str, Any] = g_cost
UpperCAmelCase : Dict = parent
UpperCAmelCase : Tuple = self.calculate_heuristic()
def _lowercase( self ) -> float:
UpperCAmelCase : int = abs(self.pos_x - self.goal_x )
UpperCAmelCase : List[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , A ) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase_ :
def __init__( self , A , A ) -> int:
UpperCAmelCase : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A )
UpperCAmelCase : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , A )
UpperCAmelCase : Union[str, Any] = [self.start]
UpperCAmelCase : list[Node] = []
UpperCAmelCase : Any = False
def _lowercase( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : List[Any] = True
return self.retrace_path(A )
self.closed_nodes.append(A )
UpperCAmelCase : Any = self.get_successors(A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A )
else:
# retrieve the best current path
UpperCAmelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A )
else:
self.open_nodes.append(A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase( self , A ) -> list[Node]:
UpperCAmelCase : int = []
for action in delta:
UpperCAmelCase : Optional[int] = parent.pos_x + action[1]
UpperCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A , A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A , ) )
return successors
def _lowercase( self , A ) -> Path:
UpperCAmelCase : str = node
UpperCAmelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : Union[str, Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a : str = (0, 0)
a : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
a : Optional[Any] = GreedyBestFirst(init, goal)
a : Optional[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a : Any = 2
for elem in grid:
print(elem)
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a : int = TypeVar("""KT""")
a : int = TypeVar("""VT""")
class UpperCamelCase_ ( Generic[KT, VT] ):
def __init__( self , A = "root" , A = None ) -> int:
UpperCAmelCase : Union[str, Any] = key
UpperCAmelCase : List[str] = value
UpperCAmelCase : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
return f'''Node({self.key}: {self.value})'''
@property
def _lowercase( self ) -> int:
return len(self.forward )
class UpperCamelCase_ ( Generic[KT, VT] ):
def __init__( self , A = 0.5 , A = 16 ) -> Tuple:
UpperCAmelCase : Node[KT, VT] = Node[KT, VT]()
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = p
UpperCAmelCase : Dict = max_level
def __str__( self ) -> str:
UpperCAmelCase : Union[str, Any] = list(self )
if len(A ) == 0:
return f'''SkipList(level={self.level})'''
UpperCAmelCase : List[Any] = max((len(str(A ) ) for item in items) , default=4 )
UpperCAmelCase : str = max(A , 4 ) + 4
UpperCAmelCase : List[str] = self.head
UpperCAmelCase : List[str] = []
UpperCAmelCase : Optional[int] = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(A , """-""" ) + """* """ * len(A ) )
lines.append(""" """ * label_size + """| """ * len(A ) )
while len(node.forward ) != 0:
UpperCAmelCase : Any = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(A , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(A ) )
UpperCAmelCase : List[str] = node.forward
lines.append("""None""".ljust(A ) + """* """ * len(A ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(A )
def __iter__( self ) -> List[str]:
UpperCAmelCase : int = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCAmelCase : Optional[int] = node.forward[0]
def _lowercase( self ) -> int:
UpperCAmelCase : List[str] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowercase( self , A ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Dict = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCAmelCase : Optional[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowercase( self , A ) -> Dict:
UpperCAmelCase , UpperCAmelCase : Dict = self._locate_node(A )
if node is not None:
for i, update_node in enumerate(A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCAmelCase : Any = node.forward[i]
else:
UpperCAmelCase : List[Any] = update_node.forward[:i]
def _lowercase( self , A , A ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self._locate_node(A )
if node is not None:
UpperCAmelCase : List[str] = value
else:
UpperCAmelCase : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , A ):
update_vector.append(self.head )
UpperCAmelCase : Tuple = level
UpperCAmelCase : Optional[Any] = Node(A , A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(A )
else:
UpperCAmelCase : Optional[Any] = new_node
def _lowercase( self , A ) -> VT | None:
UpperCAmelCase , UpperCAmelCase : Dict = self._locate_node(A )
if node is not None:
return node.value
return None
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : Any = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
UpperCAmelCase : Optional[Any] = skip_list.head
UpperCAmelCase : Optional[int] = {}
while node.level != 0:
UpperCAmelCase : Dict = node.forward[0]
UpperCAmelCase : int = node.value
assert len(_lowercase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCamelCase ( ) -> Union[str, Any]:
UpperCAmelCase : List[str] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
UpperCAmelCase : int = skip_list.head
UpperCAmelCase : Any = {}
while node.level != 0:
UpperCAmelCase : List[Any] = node.forward[0]
UpperCAmelCase : int = node.value
if len(_lowercase ) != 4:
print()
assert len(_lowercase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCamelCase ( ) -> Optional[Any]:
UpperCAmelCase : Any = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : int = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCamelCase ( ) -> str:
UpperCAmelCase : List[Any] = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCamelCase ( ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCamelCase ( ) -> str:
UpperCAmelCase : Optional[Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(_lowercase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_lowercase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCamelCase ( ) -> List[Any]:
def is_sorted(_lowercase ):
return all(next_item >= item for item, next_item in zip(_lowercase , lst[1:] ) )
UpperCAmelCase : int = SkipList()
for i in range(1_0 ):
skip_list.insert(_lowercase , _lowercase )
assert is_sorted(list(_lowercase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_lowercase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(_lowercase ) )
def __lowerCamelCase ( ) -> List[str]:
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : List[Any] = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 672 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
set_seed(7_7_0)
a : List[Any] = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
a : Dict = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
a : str = os.path.dirname(os.path.abspath(__file__))
a : Tuple = os.path.join(os.path.expanduser("""~"""), """.cache""")
a : int = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> List[Any]:
UpperCAmelCase : int = model_type
if use_small:
key += "_small"
return os.path.join(_lowercase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
os.makedirs(_lowercase , exist_ok=_lowercase )
hf_hub_download(repo_id=_lowercase , filename=_lowercase , local_dir=_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=False , _lowercase="text" ) -> Optional[Any]:
if model_type == "text":
UpperCAmelCase : int = BarkSemanticModel
UpperCAmelCase : Optional[Any] = BarkSemanticConfig
UpperCAmelCase : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
UpperCAmelCase : List[str] = BarkCoarseModel
UpperCAmelCase : int = BarkCoarseConfig
UpperCAmelCase : Union[str, Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
UpperCAmelCase : str = BarkFineModel
UpperCAmelCase : List[str] = BarkFineConfig
UpperCAmelCase : str = BarkFineGenerationConfig
else:
raise NotImplementedError()
UpperCAmelCase : Union[str, Any] = F'''{model_type}_small''' if use_small else model_type
UpperCAmelCase : int = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_lowercase ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
UpperCAmelCase : Dict = torch.load(_lowercase , map_location=_lowercase )
# this is a hack
UpperCAmelCase : int = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
UpperCAmelCase : Optional[int] = model_args["""vocab_size"""]
UpperCAmelCase : Optional[Any] = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
UpperCAmelCase : str = model_args.pop("""n_head""" )
UpperCAmelCase : Optional[Any] = model_args.pop("""n_embd""" )
UpperCAmelCase : Optional[int] = model_args.pop("""n_layer""" )
UpperCAmelCase : str = ConfigClass(**checkpoint["""model_args"""] )
UpperCAmelCase : List[str] = ModelClass(config=_lowercase )
UpperCAmelCase : Optional[Any] = GenerationConfigClass()
UpperCAmelCase : Tuple = model_generation_config
UpperCAmelCase : Any = checkpoint["""model"""]
# fixup checkpoint
UpperCAmelCase : List[str] = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(_lowercase ):
# replace part of the key with corresponding layer name in HF implementation
UpperCAmelCase : List[Any] = k[len(_lowercase ) :]
for old_layer_name in new_layer_name_dict:
UpperCAmelCase : Union[str, Any] = new_k.replace(_lowercase , new_layer_name_dict[old_layer_name] )
UpperCAmelCase : Optional[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() )
UpperCAmelCase : Any = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
UpperCAmelCase : Dict = set(model.state_dict().keys() ) - set(state_dict.keys() )
UpperCAmelCase : Optional[int] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(_lowercase ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(_lowercase ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(_lowercase , strict=_lowercase )
UpperCAmelCase : str = model.num_parameters(exclude_embeddings=_lowercase )
UpperCAmelCase : Union[str, Any] = checkpoint["""best_val_loss"""].item()
logger.info(F'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(_lowercase , 3 )} loss''' )
model.eval()
model.to(_lowercase )
del checkpoint, state_dict
return model
def __lowerCamelCase ( _lowercase , _lowercase=False , _lowercase="text" ) -> Optional[int]:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
UpperCAmelCase : List[str] = """cpu""" # do conversion on cpu
UpperCAmelCase : int = _get_ckpt_path(_lowercase , use_small=_lowercase )
UpperCAmelCase : List[str] = _load_model(_lowercase , _lowercase , model_type=_lowercase , use_small=_lowercase )
# load bark initial model
UpperCAmelCase : Union[str, Any] = _bark_load_model(_lowercase , """cpu""" , model_type=_lowercase , use_small=_lowercase )
if model_type == "text":
UpperCAmelCase : Tuple = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=_lowercase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
UpperCAmelCase : Dict = 5
UpperCAmelCase : Tuple = 1_0
if model_type in ["text", "coarse"]:
UpperCAmelCase : Dict = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
UpperCAmelCase : List[Any] = bark_model(_lowercase )[0]
UpperCAmelCase : List[Any] = model(_lowercase )
# take last logits
UpperCAmelCase : List[Any] = output_new_model_total.logits[:, [-1], :]
else:
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : Optional[Any] = 8
UpperCAmelCase : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
UpperCAmelCase : str = model(_lowercase , _lowercase )
UpperCAmelCase : int = bark_model(_lowercase , _lowercase )
UpperCAmelCase : str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> str:
UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , _lowercase )
UpperCAmelCase : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(_lowercase , """config.json""" ) )
UpperCAmelCase : Tuple = BarkCoarseConfig.from_pretrained(os.path.join(_lowercase , """config.json""" ) )
UpperCAmelCase : Optional[Any] = BarkFineConfig.from_pretrained(os.path.join(_lowercase , """config.json""" ) )
UpperCAmelCase : List[str] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
UpperCAmelCase : Tuple = BarkSemanticModel.from_pretrained(_lowercase )
UpperCAmelCase : Dict = BarkCoarseModel.from_pretrained(_lowercase )
UpperCAmelCase : Union[str, Any] = BarkFineModel.from_pretrained(_lowercase )
UpperCAmelCase : Optional[int] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
UpperCAmelCase : Any = BarkConfig.from_sub_model_configs(
_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
UpperCAmelCase : Optional[int] = BarkModel(_lowercase )
UpperCAmelCase : Dict = semantic
UpperCAmelCase : Union[str, Any] = coarseAcoustic
UpperCAmelCase : Optional[Any] = fineAcoustic
UpperCAmelCase : Dict = codec
UpperCAmelCase : Dict = bark_generation_config
Path(_lowercase ).mkdir(exist_ok=_lowercase )
bark.save_pretrained(_lowercase , repo_id=_lowercase , push_to_hub=_lowercase )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
a : Dict = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 672 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
# Initialise PyTorch model
UpperCAmelCase : Dict = FunnelConfig.from_json_file(_lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase : Tuple = FunnelBaseModel(_lowercase ) if base_model else FunnelModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
a : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 672 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = 42
class UpperCamelCase_ :
def __init__( self , A ) -> Any:
UpperCAmelCase : list[list[Edge]] = [[] for _ in range(A )]
UpperCAmelCase : Dict = size
def __getitem__( self , A ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def _lowercase( self ) -> Optional[int]:
return self._size
def _lowercase( self , A , A , A ) -> List[str]:
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(A , A ) )
def _lowercase( self , A , A ) -> int | None:
UpperCAmelCase : List[str] = deque([start_vertex] )
UpperCAmelCase : list[int | None] = [None] * self.size
UpperCAmelCase : Optional[int] = 0
while queue:
UpperCAmelCase : Optional[Any] = queue.popleft()
UpperCAmelCase : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase : List[Any] = current_distance + edge.weight
UpperCAmelCase : Dict = distances[edge.destination_vertex]
if (
isinstance(A , A )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
import math
def __lowerCamelCase ( _lowercase ) -> bool:
assert isinstance(_lowercase , _lowercase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : str = range(3 , int(math.sqrt(_lowercase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __lowerCamelCase ( _lowercase , _lowercase=1 , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = factor * value
UpperCAmelCase : List[Any] = value
while not is_prime(_lowercase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowercase )
return value
| 672 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a : Optional[int] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
def __lowerCamelCase ( _lowercase , _lowercase=1_0_0 , _lowercase=" " ) -> List[str]:
UpperCAmelCase : Optional[int] = text.split(_lowercase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_lowercase ) , _lowercase )]
def __lowerCamelCase ( _lowercase ) -> dict:
UpperCAmelCase , UpperCAmelCase : Optional[int] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_lowercase ):
titles.append(title if title is not None else """""" )
texts.append(_lowercase )
return {"title": titles, "text": texts}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> dict:
UpperCAmelCase : Any = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_lowercase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase : Any = ctx_encoder(input_ids.to(device=_lowercase ) , return_dict=_lowercase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , ) -> str:
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase : Any = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase : Any = dataset.map(_lowercase , batched=_lowercase , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase : int = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_lowercase )
UpperCAmelCase : List[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase : List[Any] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase : List[str] = dataset.map(
partial(_lowercase , ctx_encoder=_lowercase , ctx_tokenizer=_lowercase ) , batched=_lowercase , batch_size=processing_args.batch_size , features=_lowercase , )
# And finally save your dataset
UpperCAmelCase : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_lowercase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_lowercase )
# And save the index
UpperCAmelCase : Any = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_lowercase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class UpperCamelCase_ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a , a , a : Dict = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a : Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 672 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCAmelCase : List[str] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 2_6
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCAmelCase : Tuple = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase : Any = True
elif char.isupper():
UpperCAmelCase : Union[str, Any] = True
return all(_lowercase )
def __lowerCamelCase ( _lowercase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def __lowerCamelCase ( ) -> None:
from timeit import timeit
UpperCAmelCase : str = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=_lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=_lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 672 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : Any = tmp_path / """cache"""
UpperCAmelCase : Union[str, Any] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : int = TextDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Any:
UpperCAmelCase : List[str] = tmp_path / """cache"""
UpperCAmelCase : Dict = {"""text""": """string"""}
UpperCAmelCase : Any = features.copy() if features else default_expected_features
UpperCAmelCase : List[str] = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Union[str, Any] = TextDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
UpperCAmelCase : List[Any] = tmp_path / """cache"""
UpperCAmelCase : Dict = {"""text""": """string"""}
UpperCAmelCase : List[Any] = TextDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
if issubclass(_lowercase , _lowercase ):
UpperCAmelCase : int = text_path
elif issubclass(_lowercase , _lowercase ):
UpperCAmelCase : Optional[int] = [text_path]
UpperCAmelCase : int = tmp_path / """cache"""
UpperCAmelCase : Dict = {"""text""": """string"""}
UpperCAmelCase : List[Any] = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_text_dataset(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=("train",) ) -> Any:
assert isinstance(_lowercase , _lowercase )
for split in splits:
UpperCAmelCase : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = tmp_path / """cache"""
UpperCAmelCase : Optional[Any] = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : List[str] = TextDatasetReader({"""train""": text_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : int = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase : Optional[int] = {"""text""": """string"""}
UpperCAmelCase : List[Any] = features.copy() if features else default_expected_features
UpperCAmelCase : str = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : str = TextDatasetReader({"""train""": text_path} , features=_lowercase , cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
if split:
UpperCAmelCase : Any = {split: text_path}
else:
UpperCAmelCase : Optional[int] = """train"""
UpperCAmelCase : Optional[int] = {"""train""": text_path, """test""": text_path}
UpperCAmelCase : str = tmp_path / """cache"""
UpperCAmelCase : str = {"""text""": """string"""}
UpperCAmelCase : Optional[Any] = TextDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_text_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 672 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
a : int = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
a : int = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def _lowercase( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase( self ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase( self , A , A , A=None , A="uniform_average" , A=True ) -> List[Any]:
UpperCAmelCase : List[Any] = mean_squared_error(
A , A , sample_weight=A , multioutput=A , squared=A )
return {"mse": mse}
| 672 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Optional[int] = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'gpt_neox_japanese'
def __init__( self , A=32000 , A=2560 , A=32 , A=32 , A=4 , A="gelu" , A=1.0_0 , A=10000 , A=2048 , A=0.0_2 , A=1e-5 , A=True , A=31996 , A=31999 , A=0.1 , A=0.0 , **A , ) -> List[Any]:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : int = max_position_embeddings
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Any = intermediate_multiple_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : List[str] = rotary_pct
UpperCAmelCase : int = rotary_emb_base
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : Any = use_cache
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Union[str, Any] = hidden_dropout
| 672 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Any = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'fnet'
def __init__( self , A=32000 , A=768 , A=12 , A=3072 , A="gelu_new" , A=0.1 , A=512 , A=4 , A=0.0_2 , A=1e-12 , A=False , A=512 , A=3 , A=1 , A=2 , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Dict = max_position_embeddings
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
UpperCAmelCase : List[Any] = tpu_short_seq_length
| 672 | 1 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=64 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , A=2 , A=2 , A=2 , A=2 , A=4 , A=1 , ) -> int:
UpperCAmelCase : str = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : List[Any] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : Tuple = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Optional[int] = type_vocab_size
UpperCAmelCase : Union[str, Any] = type_sequence_label_size
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = num_labels
UpperCAmelCase : Union[str, Any] = num_choices
UpperCAmelCase : Dict = scope
UpperCAmelCase : Optional[Any] = q_groups
UpperCAmelCase : Dict = k_groups
UpperCAmelCase : Any = v_groups
UpperCAmelCase : Union[str, Any] = post_attention_groups
UpperCAmelCase : List[str] = intermediate_groups
UpperCAmelCase : Dict = output_groups
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : str = None
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Optional[int] = None
if self.use_labels:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> int:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowercase( self , A , A , A , A , A , A ) -> Tuple:
UpperCAmelCase : Any = SqueezeBertModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(A , A )
UpperCAmelCase : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A ) -> Dict:
UpperCAmelCase : List[Any] = SqueezeBertForMaskedLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = SqueezeBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Optional[int] = model(
A , attention_mask=A , start_positions=A , end_positions=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self , A , A , A , A , A , A ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : int = SqueezeBertForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Optional[int] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A ) -> Union[str, Any]:
UpperCAmelCase : str = self.num_labels
UpperCAmelCase : Dict = SqueezeBertForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A ) -> List[str]:
UpperCAmelCase : Union[str, Any] = self.num_choices
UpperCAmelCase : Any = SqueezeBertForMultipleChoice(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Optional[int] = config_and_inputs
UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
lowercase = False
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Tuple = SqueezeBertModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , dim=37 )
def _lowercase( self ) -> Any:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A )
@slow
def _lowercase( self ) -> Optional[int]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] = SqueezeBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Optional[int] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
UpperCAmelCase : Any = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
UpperCAmelCase : Dict = model(A )[0]
UpperCAmelCase : Dict = torch.Size((1, 3) )
self.assertEqual(output.shape , A )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(A , A , atol=1e-4 ) )
| 672 |
'''simple docstring'''
a : List[Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowercase )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : List[Any] = operators[opr](_lowercase , _lowercase )
operand_stack.push(_lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 672 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[Any] = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : List[str] = val
def __lowerCamelCase ( _lowercase ) -> Any:
UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCAmelCase : Dict = value
else:
UpperCAmelCase : List[Any] = value
return new_state_dict
def __lowerCamelCase ( _lowercase , _lowercase=False ) -> Optional[int]:
UpperCAmelCase : Dict = """"""
if is_panoptic:
UpperCAmelCase : Tuple = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase : List[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Dict = in_proj_weight[:2_5_6, :]
UpperCAmelCase : Optional[Any] = in_proj_bias[:2_5_6]
UpperCAmelCase : List[Any] = in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase : Tuple = in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase : List[str] = in_proj_weight[-2_5_6:, :]
UpperCAmelCase : List[str] = in_proj_bias[-2_5_6:]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase : List[Any] = """resnet101"""
if "dc5" in model_name:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : List[Any] = """panoptic""" in model_name
if is_panoptic:
UpperCAmelCase : Union[str, Any] = 2_5_0
else:
UpperCAmelCase : int = 9_1
UpperCAmelCase : Tuple = """huggingface/label-files"""
UpperCAmelCase : List[Any] = """coco-detection-id2label.json"""
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Dict = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = idalabel
UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase : List[str] = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCAmelCase : List[Any] = ConditionalDetrImageProcessor(format=_lowercase )
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = encoding["""pixel_values"""]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
UpperCAmelCase : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , _lowercase , pretrained=_lowercase ).eval()
UpperCAmelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase : List[Any] = """conditional_detr.""" + src
rename_key(_lowercase , _lowercase , _lowercase )
UpperCAmelCase : List[Any] = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase , is_panoptic=_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase : int = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowercase )
UpperCAmelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase : Any = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCAmelCase : List[Any] = state_dict.pop(_lowercase )
UpperCAmelCase : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCAmelCase : Optional[int] = state_dict.pop(_lowercase )
UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase : List[Any] = ConditionalDetrForSegmentation(_lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
model.push_to_hub(repo_id=_lowercase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCAmelCase : Union[str, Any] = conditional_detr(_lowercase )
UpperCAmelCase : int = model(_lowercase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 672 | 1 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , _lowercase )
UpperCAmelCase : Tuple = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase : Any = dataset_size < in_memory_max_size
else:
UpperCAmelCase : Any = False
UpperCAmelCase : Optional[Any] = is_small_dataset(_lowercase )
assert result == expected
| 672 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.ndarray:
UpperCAmelCase : List[str] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase : Any = ya
UpperCAmelCase : Any = xa
for k in range(_lowercase ):
UpperCAmelCase : List[Any] = y[k] + step_size * ode_func(_lowercase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a : List[Any] = None
a : List[Any] = logging.get_logger(__name__)
a : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
a : Any = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
a : int = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
a : Tuple = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = BigBirdTokenizer
lowercase = ['input_ids', 'attention_mask']
lowercase = []
def __init__( self , A=None , A=None , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , **A , ) -> Dict:
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
UpperCAmelCase : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
UpperCAmelCase : Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
UpperCAmelCase : List[str] = vocab_file
UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> np.array:
UpperCAmelCase : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : str = np.zeros((n + 1,) )
UpperCAmelCase : Optional[Any] = ya
UpperCAmelCase : Union[str, Any] = xa
for k in range(_lowercase ):
UpperCAmelCase : Dict = y[k] + step_size * ode_func(_lowercase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(_lowercase , y[k] ) + ode_func(x + step_size , _lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Tuple = {"""vocab_file""": """spiece.model"""}
a : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
a : Dict = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = []
def __init__( self , A , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , A = None , **A , ) -> None:
UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
UpperCAmelCase : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
UpperCAmelCase : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sep_token=A , mask_token=A , cls_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def _lowercase( self ) -> Optional[int]:
return self.sp_model.get_piece_size()
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self , A ) -> int:
UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : int = {}
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def _lowercase( self , A ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Dict = self.sp_model.IdToPiece(A )
return token
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : int = []
UpperCAmelCase : int = """"""
UpperCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(A )
UpperCAmelCase : Dict = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def _lowercase( self , A , A = False , A = None , A = True , **A , ) -> str:
UpperCAmelCase : Tuple = kwargs.pop("""use_source_tokenizer""" , A )
UpperCAmelCase : str = self.convert_ids_to_tokens(A , skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase : Tuple = []
UpperCAmelCase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
UpperCAmelCase : int = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase : List[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(A ) )
else:
UpperCAmelCase : str = """""".join(A )
UpperCAmelCase : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase : List[Any] = self.clean_up_tokenization(A )
return clean_text
else:
return text
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : int = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 672 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.