code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
import os
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'triangle.txt' )
with open(_UpperCamelCase ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = []
for line in triangle:
SCREAMING_SNAKE_CASE = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(_UpperCamelCase ) )
a.append(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE = a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCamelCase , _UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ : Optional[int] = pytest.mark.integration
a_ : Optional[int] = {"comet"}
a_ : List[str] = importlib.util.find_spec("fairseq") is not None
a_ : Tuple = {"code_eval"}
a_ : List[Any] = os.name == "nt"
a_ : Dict = {"bertscore", "frugalscore", "perplexity"}
a_ : Union[str, Any] = importlib.util.find_spec("transformers") is not None
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self : List[str] , _UpperCamelCase : int ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _UpperCamelCase )
return wrapper
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self : str , _UpperCamelCase : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _UpperCamelCase )
return wrapper
def __lowerCAmelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
@wraps(_UpperCamelCase )
def wrapper(self : Union[str, Any] , _UpperCamelCase : Any ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _UpperCamelCase )
return wrapper
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@local
class UpperCamelCase ( parameterized.TestCase ):
__UpperCamelCase ={}
__UpperCamelCase =None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '[...]'
SCREAMING_SNAKE_CASE = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , snake_case__ ) ).module_path )
SCREAMING_SNAKE_CASE = datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case__ )
# check parameters
SCREAMING_SNAKE_CASE = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '[...]'
SCREAMING_SNAKE_CASE = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , snake_case__ ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case__ ):
yield
else:
yield
@contextmanager
def UpperCamelCase ( self : Any ):
"""simple docstring"""
def load_local_metric(snake_case__ : Tuple , *snake_case__ : int , **snake_case__ : List[str] ):
return load_metric(os.path.join('metrics' , snake_case__ ) , *snake_case__ , **snake_case__ )
with patch('datasets.load_metric' ) as mock_load_metric:
SCREAMING_SNAKE_CASE = load_local_metric
yield
@classmethod
def UpperCamelCase ( cls : Any , snake_case__ : Any ):
"""simple docstring"""
def wrapper(snake_case__ : Any ):
SCREAMING_SNAKE_CASE = contextmanager(snake_case__ )
SCREAMING_SNAKE_CASE = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
SCREAMING_SNAKE_CASE = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
import torch
def bert_cos_score_idf(_UpperCamelCase : List[Any] , _UpperCamelCase : Any , *_UpperCamelCase : Dict , **_UpperCamelCase : str ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
def load_from_checkpoint(_UpperCamelCase : Any ):
class UpperCamelCase :
def UpperCamelCase ( self : str , snake_case__ : Any , *snake_case__ : List[Any] , **snake_case__ : Dict ):
"""simple docstring"""
assert len(snake_case__ ) == 2
SCREAMING_SNAKE_CASE = [0.19, 0.92]
return scores, sum(snake_case__ ) / len(snake_case__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
SCREAMING_SNAKE_CASE = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE = load_from_checkpoint
yield
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_metric(os.path.join('metrics' , 'seqeval' ) )
SCREAMING_SNAKE_CASE = 'ERROR'
SCREAMING_SNAKE_CASE = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_UpperCamelCase , match=re.escape(_UpperCamelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=_UpperCamelCase )
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE = False
if num < 0:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = -num
SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCamelCase ) for e in binary )
return "0b" + "".join(str(_UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
a_ : List[str] = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
a_ : Tuple = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger(__name__)
a_ : List[Any] = " Hello world! cécé herlolip"
a_ : List[str] = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dct.pop(_UpperCamelCase )
SCREAMING_SNAKE_CASE = val
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if not os.path.exists(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = torch.hub.load('pytorch/fairseq' , _UpperCamelCase ).eval()
else:
SCREAMING_SNAKE_CASE = load_xsum_checkpoint(_UpperCamelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
SCREAMING_SNAKE_CASE = checkpoint_path.replace('.' , '-' )
SCREAMING_SNAKE_CASE = BartConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = bart.encode(_UpperCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = BartTokenizer.from_pretrained(_UpperCamelCase ).encode(_UpperCamelCase , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(_UpperCamelCase , _UpperCamelCase ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
SCREAMING_SNAKE_CASE = bart.state_dict()
remove_ignore_keys_(_UpperCamelCase )
SCREAMING_SNAKE_CASE = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = BartForSequenceClassification(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = bart.predict('mnli' , _UpperCamelCase , return_logits=_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )[0] # logits
else: # no classification heads to worry about
SCREAMING_SNAKE_CASE = bart.model.state_dict()
remove_ignore_keys_(_UpperCamelCase )
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE = bart.extract_features(_UpperCamelCase )
if hf_checkpoint_name == "facebook/bart-large":
SCREAMING_SNAKE_CASE = BartModel(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase ).model[0]
else:
SCREAMING_SNAKE_CASE = BartForConditionalGeneration(_UpperCamelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_UpperCamelCase )
if hasattr(_UpperCamelCase , 'lm_head' ):
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
SCREAMING_SNAKE_CASE = model.model(_UpperCamelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
a_ : Optional[Any] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase :
def __init__( self : List[Any] , snake_case__ : Dict , snake_case__ : List[Any]=1_3 , snake_case__ : Optional[int]=7 , snake_case__ : Dict=True , snake_case__ : List[str]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : int=6_4 , snake_case__ : Optional[int]=3_2 , snake_case__ : Union[str, Any]=5 , snake_case__ : str=4 , snake_case__ : Dict=3_7 , snake_case__ : List[str]="gelu" , snake_case__ : int=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : Optional[int]=1_6 , snake_case__ : List[Any]=2 , snake_case__ : int=0.02 , snake_case__ : Any=3 , snake_case__ : int=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MegatronBertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MegatronBertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MegatronBertForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MegatronBertForNextSentencePrediction(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MegatronBertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , next_sentence_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MegatronBertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MegatronBertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Any , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MegatronBertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Dict , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = MegatronBertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =True
# test_resize_embeddings = False
__UpperCamelCase =False
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : str=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case__ )
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
a_ : Tuple = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE = os.path.join(os.environ['MYDIR'] , snake_case__ )
SCREAMING_SNAKE_CASE = MegatronBertModel.from_pretrained(snake_case__ )
model.to(snake_case__ )
model.half()
SCREAMING_SNAKE_CASE = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE = output[0, ii, jj]
SCREAMING_SNAKE_CASE = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE = 'ii={} jj={} a={} b={}'.format(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.assertTrue(math.isclose(snake_case__ , snake_case__ , rel_tol=snake_case__ , abs_tol=snake_case__ ) , msg=snake_case__ )
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OmegaConf.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 'first_stage_model.'
for key in keys:
if key.startswith(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 'model.diffusion_model.'
for key in keys:
if key.startswith(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE = VQModel(**_UpperCamelCase ).eval()
vqvae.load_state_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = UNetLDMModel(**_UpperCamelCase ).eval()
unet.load_state_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = LDMPipeline(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
pipeline.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
a_ : str = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : List[str] , snake_case__ : int = 3_2 , snake_case__ : int = 6_4 , snake_case__ : int = 2_0 , snake_case__ : int = 7_6_8 , snake_case__ : Union[str, Any]=7_7 , snake_case__ : str=4 , snake_case__ : float = 0.0 , snake_case__ : str = "silu" , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = "linear" , snake_case__ : Optional[str] = "prd" , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = attention_head_dim
SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE = additional_embeddings
SCREAMING_SNAKE_CASE = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE = Timesteps(snake_case__ , snake_case__ , 0 )
SCREAMING_SNAKE_CASE = TimestepEmbedding(snake_case__ , snake_case__ , out_dim=snake_case__ , act_fn=snake_case__ )
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , snake_case__ )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE = nn.LayerNorm(snake_case__ )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , snake_case__ )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , snake_case__ )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case__ ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , 1 , snake_case__ ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
SCREAMING_SNAKE_CASE = nn.ModuleList(
[
BasicTransformerBlock(
snake_case__ , snake_case__ , snake_case__ , dropout=snake_case__ , activation_fn='gelu' , attention_bias=snake_case__ , )
for d in range(snake_case__ )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE = nn.LayerNorm(snake_case__ )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
SCREAMING_SNAKE_CASE = nn.LayerNorm(snake_case__ )
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , snake_case__ , persistent=snake_case__ )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , snake_case__ ) )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , snake_case__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
def fn_recursive_add_processors(snake_case__ : str , snake_case__ : torch.nn.Module , snake_case__ : Dict[str, AttentionProcessor] ):
if hasattr(snake_case__ , 'set_processor' ):
SCREAMING_SNAKE_CASE = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , snake_case__ , snake_case__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case__ , snake_case__ , snake_case__ )
return processors
def UpperCamelCase ( self : Dict , snake_case__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = len(self.attn_processors.keys() )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(snake_case__ )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(snake_case__ : str , snake_case__ : torch.nn.Module , snake_case__ : List[str] ):
if hasattr(snake_case__ , 'set_processor' ):
if not isinstance(snake_case__ , snake_case__ ):
module.set_processor(snake_case__ )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , snake_case__ , snake_case__ )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase ( self : Any , snake_case__ : str , snake_case__ : Union[torch.Tensor, float, int] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.BoolTensor] = None , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = hidden_states.shape[0]
SCREAMING_SNAKE_CASE = timestep
if not torch.is_tensor(snake_case__ ):
SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case__ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE = timesteps * torch.ones(snake_case__ , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE = self.time_proj(snake_case__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE = self.time_embedding(snake_case__ )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE = self.embedding_proj_norm(snake_case__ )
SCREAMING_SNAKE_CASE = self.embedding_proj(snake_case__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE = self.encoder_hidden_states_proj(snake_case__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
SCREAMING_SNAKE_CASE = self.proj_in(snake_case__ )
SCREAMING_SNAKE_CASE = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case__ , -1 , -1 )
additional_embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE = torch.cat(
snake_case__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE = F.pad(
snake_case__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
SCREAMING_SNAKE_CASE = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
SCREAMING_SNAKE_CASE = F.pad(snake_case__ , (0, self.additional_embeddings) , value=0.0 )
SCREAMING_SNAKE_CASE = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE = self.norm_in(snake_case__ )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE = block(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = self.norm_out(snake_case__ )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE = self.proj_to_clip_embeddings(snake_case__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
for i in range(len(_UpperCamelCase ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE = True
for j in range(_UpperCamelCase ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE = False
break
if match_found:
position.append(_UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =BertJapaneseTokenizer
__UpperCamelCase =False
__UpperCamelCase =True
def UpperCamelCase ( self : int ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'こんにちは、世界。 \nこんばんは、世界。'
SCREAMING_SNAKE_CASE = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_input_output_texts(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
return text, ids
def UpperCamelCase ( self : str ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(snake_case__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(snake_case__ )
SCREAMING_SNAKE_CASE = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(snake_case__ , 'wb' ) as handle:
pickle.dump(snake_case__ , snake_case__ )
with open(snake_case__ , 'rb' ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MecabTokenizer(do_lower_case=snake_case__ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE = MecabTokenizer(
do_lower_case=snake_case__ , normalize_text=snake_case__ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MecabTokenizer(normalize_text=snake_case__ , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(snake_case__ )
SCREAMING_SNAKE_CASE = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(snake_case__ , 'wb' ) as handle:
pickle.dump(snake_case__ , snake_case__ )
with open(snake_case__ , 'rb' ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@require_sudachi
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SudachiTokenizer(do_lower_case=snake_case__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SudachiTokenizer(normalize_text=snake_case__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SudachiTokenizer(trim_whitespace=snake_case__ , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(snake_case__ )
SCREAMING_SNAKE_CASE = 'こんにちは、世界。\nこんばんは、世界。'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(snake_case__ , 'wb' ) as handle:
pickle.dump(snake_case__ , snake_case__ )
with open(snake_case__ , 'rb' ) as handle:
SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_new.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@require_jumanpp
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = JumanppTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = JumanppTokenizer(normalize_text=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = JumanppTokenizer(trim_whitespace=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=snake_case__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
SCREAMING_SNAKE_CASE = tokenizer.subword_tokenizer
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(snake_case__ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
SCREAMING_SNAKE_CASE = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(snake_case__ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
SCREAMING_SNAKE_CASE = tokenizer.encode('ありがとう。' , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.encode('どういたしまして。' , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =BertJapaneseTokenizer
__UpperCamelCase =False
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self : str , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **snake_case__ )
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'こんにちは、世界。 \nこんばんは、世界。'
SCREAMING_SNAKE_CASE = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
snake_case__ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = CharacterTokenizer(vocab=snake_case__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
SCREAMING_SNAKE_CASE = tokenizer.encode('ありがとう。' , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.encode('どういたしまして。' , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cl-tohoku/bert-base-japanese'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(snake_case__ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
SCREAMING_SNAKE_CASE = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(snake_case__ )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="open-llama"
def __init__( self : Dict , snake_case__ : List[str]=1_0_0_0_0_0 , snake_case__ : Dict=4_0_9_6 , snake_case__ : Dict=1_1_0_0_8 , snake_case__ : Optional[int]=3_2 , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any="silu" , snake_case__ : Optional[int]=2_0_4_8 , snake_case__ : Tuple=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : Dict=True , snake_case__ : List[str]=0 , snake_case__ : Optional[int]=1 , snake_case__ : int=2 , snake_case__ : Any=False , snake_case__ : Dict=True , snake_case__ : Dict=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=True , snake_case__ : Dict=None , **snake_case__ : Optional[int] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = rms_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = kwargs.pop(
'use_memorry_efficient_attention' , snake_case__ )
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_dropout_prob
SCREAMING_SNAKE_CASE = use_stable_embedding
SCREAMING_SNAKE_CASE = shared_input_output_embedding
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"""got {self.rope_scaling}""" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get('type' , snake_case__ )
SCREAMING_SNAKE_CASE = self.rope_scaling.get('factor' , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ : Any = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
a_ : str = "hopper-medium-v2"
a_ : Tuple = gym.make(env_name)
a_ : str = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
a_ : int = env.reset()
a_ : Dict = 0
a_ : Dict = 0
a_ : int = 1000
a_ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ : int = pipeline(obs, planning_horizon=32)
# execute action in environment
a_ , a_ , a_ , a_ : List[str] = env.step(denorm_actions)
a_ : Optional[int] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ : Dict = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Any=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : Optional[Any]=9_9 , snake_case__ : List[str]=3_2 , snake_case__ : int=2 , snake_case__ : Optional[int]=4 , snake_case__ : Any=3_7 , snake_case__ : Any="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : Tuple=1_6 , snake_case__ : int=2 , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=3 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 1_3
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 9_9
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3_7
SCREAMING_SNAKE_CASE = 'gelu'
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 5_1_2
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFRoFormerModel(config=snake_case__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=snake_case__ )
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE = model(snake_case__ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=snake_case__ )
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=snake_case__ )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Any , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=snake_case__ )
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Dict ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(snake_case__ )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
# TODO Replace vocab size
SCREAMING_SNAKE_CASE = 5_0_0_0_0
SCREAMING_SNAKE_CASE = [1, 6, vocab_size]
self.assertEqual(output.shape , snake_case__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1E-4 )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =1e-4
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = tf.constant([[4, 1_0]] )
SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
SCREAMING_SNAKE_CASE = emba(input_ids.shape )
SCREAMING_SNAKE_CASE = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(snake_case__ , snake_case__ , atol=self.tolerance )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
SCREAMING_SNAKE_CASE = emba.weight[:3, :5]
tf.debugging.assert_near(snake_case__ , snake_case__ , atol=self.tolerance )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =1e-4
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
SCREAMING_SNAKE_CASE = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , snake_case__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , snake_case__ , atol=self.tolerance )
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ : Dict = logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
__UpperCamelCase =0
__UpperCamelCase =1
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="generated"
def __init__( self : int , *snake_case__ : List[str] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Any=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]=None , **snake_case__ : Any , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if truncation is not None:
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = generate_kwargs
SCREAMING_SNAKE_CASE = {}
if return_tensors is not None and return_type is None:
SCREAMING_SNAKE_CASE = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
SCREAMING_SNAKE_CASE = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE = self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
if len(snake_case__ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
SCREAMING_SNAKE_CASE = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
return True
def UpperCamelCase ( self : Union[str, Any] , *snake_case__ : Tuple , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , snake_case__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
SCREAMING_SNAKE_CASE = ([prefix + arg for arg in args[0]],)
SCREAMING_SNAKE_CASE = True
elif isinstance(args[0] , snake_case__ ):
SCREAMING_SNAKE_CASE = (prefix + args[0],)
SCREAMING_SNAKE_CASE = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
SCREAMING_SNAKE_CASE = self.tokenizer(*snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[str] , *snake_case__ : Any , **snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super().__call__(*snake_case__ , **snake_case__ )
if (
isinstance(args[0] , snake_case__ )
and all(isinstance(snake_case__ , snake_case__ ) for el in args[0] )
and all(len(snake_case__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase ( self : List[str] , snake_case__ : Tuple , snake_case__ : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._parse_and_tokenize(snake_case__ , truncation=snake_case__ , **snake_case__ )
return inputs
def UpperCamelCase ( self : str , snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
if self.framework == "pt":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_inputs['input_ids'].shape
elif self.framework == "tf":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tf.shape(model_inputs['input_ids'] ).numpy()
SCREAMING_SNAKE_CASE = generate_kwargs.get('min_length' , self.model.config.min_length )
SCREAMING_SNAKE_CASE = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(snake_case__ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
SCREAMING_SNAKE_CASE = self.model.generate(**snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = output_ids.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE = output_ids.reshape(snake_case__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE = tf.reshape(snake_case__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : Tuple=ReturnType.TEXT , snake_case__ : Union[str, Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
SCREAMING_SNAKE_CASE = {
F"""{self.return_name}_text""": self.tokenizer.decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
}
records.append(snake_case__ )
return records
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="summary"
def __call__( self : Union[str, Any] , *snake_case__ : int , **snake_case__ : Optional[int] ):
"""simple docstring"""
return super().__call__(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="translation"
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCamelCase ( self : List[Any] , *snake_case__ : List[Any] , snake_case__ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , snake_case__ : List[str]=None , snake_case__ : Any=None ):
"""simple docstring"""
if getattr(self.tokenizer , '_build_translation_inputs' , snake_case__ ):
return self.tokenizer._build_translation_inputs(
*snake_case__ , return_tensors=self.framework , truncation=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ )
else:
return super()._parse_and_tokenize(*snake_case__ , truncation=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : List[Any]=None , snake_case__ : Optional[int]=None , **snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = super()._sanitize_parameters(**snake_case__ )
if src_lang is not None:
SCREAMING_SNAKE_CASE = src_lang
if tgt_lang is not None:
SCREAMING_SNAKE_CASE = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
SCREAMING_SNAKE_CASE = kwargs.get('task' , self.task )
SCREAMING_SNAKE_CASE = task.split('_' )
if task and len(snake_case__ ) == 4:
# translation, XX, to YY
SCREAMING_SNAKE_CASE = items[1]
SCREAMING_SNAKE_CASE = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , *snake_case__ : str , **snake_case__ : List[str] ):
"""simple docstring"""
return super().__call__(*snake_case__ , **snake_case__ )
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
SCREAMING_SNAKE_CASE = {
'do_resize': True,
'size': 2_0,
'do_center_crop': True,
'crop_size': 1_8,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple , **snake_case__ : int ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCamelCase ( self : int , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(snake_case__ , return_tensors='np' )
SCREAMING_SNAKE_CASE = processor(images=snake_case__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = processor(text=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
return x + 2
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'x = 3'
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ , {'x': 3} )
SCREAMING_SNAKE_CASE = 'x = y'
SCREAMING_SNAKE_CASE = {'y': 5}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ , {'x': 5, 'y': 5} )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'y = add_two(x)'
SCREAMING_SNAKE_CASE = {'x': 3}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {'add_two': add_two} , state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
assert result is None
assert "tried to execute add_two" in out.out
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'x = 3'
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ , {'x': 3} )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
SCREAMING_SNAKE_CASE = {'x': 3}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {'add_two': add_two} , state=snake_case__ )
self.assertDictEqual(snake_case__ , {'x': 3, 'y': 5} )
self.assertDictEqual(snake_case__ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'x = 3\ny = 5'
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ , {'x': 3, 'y': 5} )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'text = f\'This is x: {x}.\''
SCREAMING_SNAKE_CASE = {'x': 3}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(snake_case__ , {'x': 3, 'text': 'This is x: 3.'} )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'if x <= 3:\n y = 2\nelse:\n y = 5'
SCREAMING_SNAKE_CASE = {'x': 3}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(snake_case__ , {'x': 3, 'y': 2} )
SCREAMING_SNAKE_CASE = {'x': 8}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(snake_case__ , {'x': 8, 'y': 5} )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'test_list = [x, add_two(x)]'
SCREAMING_SNAKE_CASE = {'x': 3}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {'add_two': add_two} , state=snake_case__ )
self.assertListEqual(snake_case__ , [3, 5] )
self.assertDictEqual(snake_case__ , {'x': 3, 'test_list': [3, 5]} )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'y = x'
SCREAMING_SNAKE_CASE = {'x': 3}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {} , state=snake_case__ )
assert result == 3
self.assertDictEqual(snake_case__ , {'x': 3, 'y': 3} )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'test_list = [x, add_two(x)]\ntest_list[1]'
SCREAMING_SNAKE_CASE = {'x': 3}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {'add_two': add_two} , state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ , {'x': 3, 'test_list': [3, 5]} )
SCREAMING_SNAKE_CASE = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
SCREAMING_SNAKE_CASE = {'x': 3}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {'add_two': add_two} , state=snake_case__ )
assert result == 5
self.assertDictEqual(snake_case__ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'x = 0\nfor i in range(3):\n x = i'
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = evaluate(snake_case__ , {'range': range} , state=snake_case__ )
assert result == 2
self.assertDictEqual(snake_case__ , {'x': 2, 'i': 2} )
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self : List[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
"""simple docstring"""
if len(snake_case__ ) == 0 or len(snake_case__ ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(snake_case__ ) )
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [sequences]
SCREAMING_SNAKE_CASE = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(snake_case__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] , snake_case__ : List[Any]=ZeroShotClassificationArgumentHandler() , *snake_case__ : Optional[int] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = args_parser
super().__init__(*snake_case__ , **snake_case__ )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCamelCase ( self : int , snake_case__ : Union[str, Any] , snake_case__ : int=True , snake_case__ : Tuple=True , snake_case__ : str=TruncationStrategy.ONLY_FIRST , **snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
SCREAMING_SNAKE_CASE = self.tokenizer.eos_token
try:
SCREAMING_SNAKE_CASE = self.tokenizer(
snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , padding=snake_case__ , truncation=snake_case__ , )
except Exception as e:
if "too short" in str(snake_case__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
SCREAMING_SNAKE_CASE = self.tokenizer(
snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , padding=snake_case__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCamelCase ( self : Tuple , **snake_case__ : Any ):
"""simple docstring"""
if kwargs.get('multi_class' , snake_case__ ) is not None:
SCREAMING_SNAKE_CASE = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['hypothesis_template']
SCREAMING_SNAKE_CASE = {}
if "multi_label" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , snake_case__ : Union[str, List[str]] , *snake_case__ : int , **snake_case__ : int , ):
"""simple docstring"""
if len(snake_case__ ) == 0:
pass
elif len(snake_case__ ) == 1 and "candidate_labels" not in kwargs:
SCREAMING_SNAKE_CASE = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : Any , snake_case__ : Union[str, Any]=None , snake_case__ : str="This example is {}." ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._args_parser(snake_case__ , snake_case__ , snake_case__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(snake_case__ , snake_case__ ) ):
SCREAMING_SNAKE_CASE = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(snake_case__ ) - 1,
**model_input,
}
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = inputs['candidate_label']
SCREAMING_SNAKE_CASE = inputs['sequence']
SCREAMING_SNAKE_CASE = {k: inputs[k] for k in self.tokenizer.model_input_names}
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Tuple=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [outputs['candidate_label'] for outputs in model_outputs]
SCREAMING_SNAKE_CASE = [outputs['sequence'] for outputs in model_outputs]
SCREAMING_SNAKE_CASE = np.concatenate([output['logits'].numpy() for output in model_outputs] )
SCREAMING_SNAKE_CASE = logits.shape[0]
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = N // n
SCREAMING_SNAKE_CASE = logits.reshape((num_sequences, n, -1) )
if multi_label or len(snake_case__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
SCREAMING_SNAKE_CASE = self.entailment_id
SCREAMING_SNAKE_CASE = -1 if entailment_id == 0 else 0
SCREAMING_SNAKE_CASE = reshaped_outputs[..., [contradiction_id, entailment_id]]
SCREAMING_SNAKE_CASE = np.exp(snake_case__ ) / np.exp(snake_case__ ).sum(-1 , keepdims=snake_case__ )
SCREAMING_SNAKE_CASE = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
SCREAMING_SNAKE_CASE = reshaped_outputs[..., self.entailment_id]
SCREAMING_SNAKE_CASE = np.exp(snake_case__ ) / np.exp(snake_case__ ).sum(-1 , keepdims=snake_case__ )
SCREAMING_SNAKE_CASE = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a_ : Dict = False
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : int , snake_case__ : List[Any]=3_2 ):
"""simple docstring"""
set_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=snake_case__ , in_channels=3 , out_channels=3 )
SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=snake_case__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(snake_case__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 3_2, 3_2) ).to(snake_case__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(snake_case__ ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE = model(snake_case__ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=3_2 )
model.train().to(snake_case__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
SCREAMING_SNAKE_CASE = model(snake_case__ , timesteps[i] ).sample
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(snake_case__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
while a != 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b % a, a
return b
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
'''simple docstring'''
if gcd(_UpperCamelCase , _UpperCamelCase ) != 1:
SCREAMING_SNAKE_CASE = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1, 0, a
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 1, m
while va != 0:
SCREAMING_SNAKE_CASE = ua // va
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def UpperCamelCase ( self : str , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TextaTextGenerationPipeline(model=snake_case__ , tokenizer=snake_case__ )
return generator, ["Something to write", "Something else"]
def UpperCamelCase ( self : Dict , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = generator('Something there' )
self.assertEqual(snake_case__ , [{'generated_text': ANY(snake_case__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
SCREAMING_SNAKE_CASE = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
] , )
SCREAMING_SNAKE_CASE = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
] , )
with self.assertRaises(snake_case__ ):
generator(4 )
@require_torch
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=snake_case__ )
self.assertEqual(snake_case__ , [{'generated_text': ''}] )
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = generator(
'Something there' , num_return_sequences=snake_case__ , num_beams=snake_case__ , )
SCREAMING_SNAKE_CASE = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = generator('This is a test' , do_sample=snake_case__ , num_return_sequences=2 , return_tensors=snake_case__ )
self.assertEqual(
snake_case__ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
SCREAMING_SNAKE_CASE = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case__ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case__ , )
self.assertEqual(
snake_case__ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE = generator('Something there' , do_sample=snake_case__ )
self.assertEqual(snake_case__ , [{'generated_text': ''}] )
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a_ : Optional[Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def __lowerCAmelCase ( _UpperCamelCase : Any=None ) -> Dict:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('config' , description=_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate config command' , description=_UpperCamelCase )
parser.add_argument(
'--config_file' , default=_UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_UpperCamelCase )
else:
config.to_yaml_file(_UpperCamelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = config_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(_UpperCamelCase )
if __name__ == "__main__":
main()
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : Union[str, Any]=1_3 , snake_case__ : Tuple=3_0 , snake_case__ : Dict=2 , snake_case__ : Tuple=3 , snake_case__ : Any=True , snake_case__ : Union[str, Any]=True , snake_case__ : str=3_2 , snake_case__ : List[str]=2 , snake_case__ : Any=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple=1_0 , snake_case__ : Tuple=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=0.6 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = mask_ratio
SCREAMING_SNAKE_CASE = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase ( self : Any , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFViTMAEModel(config=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : str , snake_case__ : Any , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining(snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , training=snake_case__ )
# expected sequence length = num_patches
SCREAMING_SNAKE_CASE = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining(snake_case__ )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(snake_case__ , training=snake_case__ )
SCREAMING_SNAKE_CASE = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__UpperCamelCase ={"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFViTMAEModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
SCREAMING_SNAKE_CASE = self._prepare_for_class(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , noise=snake_case__ )
SCREAMING_SNAKE_CASE = copy.deepcopy(self._prepare_for_class(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE = model(**snake_case__ , noise=snake_case__ )
SCREAMING_SNAKE_CASE = outputs_dict[0].numpy()
SCREAMING_SNAKE_CASE = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case__ : Tuple ):
SCREAMING_SNAKE_CASE = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case__ ):
SCREAMING_SNAKE_CASE = v.numpy()
else:
SCREAMING_SNAKE_CASE = np.array(snake_case__ )
return inputs_np_dict
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
SCREAMING_SNAKE_CASE = self._prepare_for_class(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = prepare_numpy_arrays(snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , noise=snake_case__ )
SCREAMING_SNAKE_CASE = model(**snake_case__ , noise=snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE = tf.constant(snake_case__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE = tf_noise
super().check_pt_tf_models(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case__ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(snake_case__ , snake_case__ ),)
if isinstance(snake_case__ , snake_case__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case__ , '_keras_serializable' , snake_case__ )
}
SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(snake_case__ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
SCREAMING_SNAKE_CASE = main_layer_class(snake_case__ )
SCREAMING_SNAKE_CASE = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
SCREAMING_SNAKE_CASE = tf.keras.Model(snake_case__ , outputs=main_layer(snake_case__ ) )
SCREAMING_SNAKE_CASE = model(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , 'keras_model.h5' )
model.save(snake_case__ )
SCREAMING_SNAKE_CASE = tf.keras.models.load_model(
snake_case__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case__ , tf.keras.Model )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
@slow
def UpperCamelCase ( self : Any ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
SCREAMING_SNAKE_CASE = self._prepare_for_class(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , noise=snake_case__ )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE = outputs.last_hidden_state.numpy()
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = outputs.logits.numpy()
SCREAMING_SNAKE_CASE = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ , saved_model=snake_case__ )
SCREAMING_SNAKE_CASE = model_class.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , noise=snake_case__ )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE = after_outputs['last_hidden_state'].numpy()
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = after_outputs['logits'].numpy()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-5 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
SCREAMING_SNAKE_CASE = self._prepare_for_class(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , noise=snake_case__ )
SCREAMING_SNAKE_CASE = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case__ )
SCREAMING_SNAKE_CASE = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
SCREAMING_SNAKE_CASE = model_class.from_config(model.config )
SCREAMING_SNAKE_CASE = new_model(snake_case__ ) # Build model
new_model.set_weights(model.get_weights() )
SCREAMING_SNAKE_CASE = new_model(snake_case__ , noise=snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(snake_case__ )
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE = ViTMAEConfig()
SCREAMING_SNAKE_CASE = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE = np.random.uniform(size=(1, num_patches) )
# forward pass
SCREAMING_SNAKE_CASE = model(**snake_case__ , noise=snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , snake_case__ )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case__ , atol=1E-4 )
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 19_01
SCREAMING_SNAKE_CASE = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
SCREAMING_SNAKE_CASE = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE = day - days_per_month[month - 2]
if month > 12:
year += 1
SCREAMING_SNAKE_CASE = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def UpperCamelCase ( snake_case__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError()
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : int = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
a_ : Dict = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase :
__UpperCamelCase =42
# setable values
__UpperCamelCase =42
__UpperCamelCase =42
__UpperCamelCase =None
@classmethod
def UpperCamelCase ( cls : Any , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =[e.name for e in FlaxKarrasDiffusionSchedulers]
__UpperCamelCase =42
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[Any] , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0_001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = dtype
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[CommonSchedulerState] = None ):
"""simple docstring"""
if common is None:
SCREAMING_SNAKE_CASE = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE = jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
"""simple docstring"""
return sample
def UpperCamelCase ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def UpperCamelCase ( self : str , snake_case__ : DDPMSchedulerState , snake_case__ : List[Any] , snake_case__ : Dict=None , snake_case__ : int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE = jnp.clip(snake_case__ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE = jnp.log(jnp.clip(snake_case__ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE = variance
SCREAMING_SNAKE_CASE = state.common.betas[t]
SCREAMING_SNAKE_CASE = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = timestep
if key is None:
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE = jax.random.split(snake_case__ , num=1 )
SCREAMING_SNAKE_CASE = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
SCREAMING_SNAKE_CASE = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : str = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
import math
def __lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : float = 1 / 1_23_45 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 3
while True:
SCREAMING_SNAKE_CASE = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
total_partitions += 1
if check_partition_perfect(_UpperCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_UpperCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ : Dict = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
import math
def __lowerCAmelCase ( _UpperCamelCase : int = 1_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = sum(i * i for i in range(1 , n + 1 ) )
SCREAMING_SNAKE_CASE = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =VideoToVideoSDPipeline
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
__UpperCamelCase =PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCamelCase =False
# No `output_type`.
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(snake_case__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Any=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'np'
SCREAMING_SNAKE_CASE = sd_pipe(**snake_case__ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
SCREAMING_SNAKE_CASE = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=snake_case__ )
SCREAMING_SNAKE_CASE = video.to('cuda' )
SCREAMING_SNAKE_CASE = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE = pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
from __future__ import annotations
from math import gcd
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 3 , ) -> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
return (pow(_UpperCamelCase , 2 ) + step) % modulus
for _ in range(_UpperCamelCase ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE = seed
SCREAMING_SNAKE_CASE = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE = gcd(hare - tortoise , _UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
a_ : Optional[Any] = parser.parse_args()
a_ : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
a_ : Tuple = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_UpperCamelCase ):
return None
SCREAMING_SNAKE_CASE = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE = left
SCREAMING_SNAKE_CASE = point
elif point > right:
SCREAMING_SNAKE_CASE = right
SCREAMING_SNAKE_CASE = point
else:
if item < current_item:
SCREAMING_SNAKE_CASE = point - 1
else:
SCREAMING_SNAKE_CASE = point + 1
return None
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
_UpperCamelCase , _UpperCamelCase , point + 1 , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> int:
'''simple docstring'''
if collection != sorted(_UpperCamelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
a_ : Optional[Any] = 0
if debug == 1:
a_ : Dict = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a_ : Optional[Any] = 67
a_ : Dict = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("Not found")
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
a_ : Dict = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any]=True ) -> Any:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE ) )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =None
__UpperCamelCase =None
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = dataset_module_factory(snake_case__ , cache_dir=snake_case__ )
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=snake_case__ )
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=snake_case__ , config_name=snake_case__ , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case__ ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE = cached_path(snake_case__ , cache_dir=snake_case__ )
self.assertTrue(os.path.exists(snake_case__ ) )
@pytest.mark.integration
def __lowerCAmelCase ( _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCamelCase )
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dataset_module_factory('wikipedia' , cache_dir=_UpperCamelCase )
SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=_UpperCamelCase )
SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=_UpperCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert "train" in ds
assert isinstance(ds['train'] , _UpperCamelCase )
assert next(iter(ds['train'] ) )
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : str=1_3 , snake_case__ : Tuple=3_0 , snake_case__ : Dict=2 , snake_case__ : Optional[Any]=3 , snake_case__ : Tuple=True , snake_case__ : Any=True , snake_case__ : List[Any]=3_2 , snake_case__ : List[Any]=2 , snake_case__ : str=4 , snake_case__ : Optional[int]=3_7 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[str]=1_0 , snake_case__ : List[str]=0.02 , snake_case__ : int=3 , snake_case__ : Union[str, Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFViTModel(config=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE = self.image_size // 2
SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ )
SCREAMING_SNAKE_CASE = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = TFViTForImageClassification(snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE = self.image_size // 2
SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE = model(snake_case__ , interpolate_pos_encoding=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = TFViTForImageClassification(snake_case__ )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__UpperCamelCase =(
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFViTModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(snake_case__ )
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE = model(**snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
SCREAMING_SNAKE_CASE = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case__ , atol=1E-4 )
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
a_ : Optional[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Tuple = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="roberta"
def __init__( self : Any , snake_case__ : List[Any]=5_0_2_6_5 , snake_case__ : Tuple=7_6_8 , snake_case__ : Any=1_2 , snake_case__ : List[str]=1_2 , snake_case__ : Dict=3_0_7_2 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : int=2 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=1E-12 , snake_case__ : Union[str, Any]=1 , snake_case__ : int=0 , snake_case__ : str=2 , snake_case__ : List[Any]="absolute" , snake_case__ : Tuple=True , snake_case__ : List[Any]=None , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
if n == 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE = len(str(fibonacci(_UpperCamelCase ) ) )
return index
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
return fibonacci_digits_index(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : Tuple=2 , snake_case__ : int=3 , snake_case__ : Any=6_4 , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.random.default_rng(snake_case__ )
SCREAMING_SNAKE_CASE = length
SCREAMING_SNAKE_CASE = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
"""simple docstring"""
return self.length
def __getitem__( self : Dict , snake_case__ : int ):
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class UpperCamelCase ( torch.nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : List[Any]=0 , snake_case__ : Any=0 , snake_case__ : Union[str, Any]=False ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self : List[Any] , snake_case__ : Tuple=None ):
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
SCREAMING_SNAKE_CASE = False
return x * self.a[0] + self.b[0]
class UpperCamelCase ( torch.nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : Tuple=0 , snake_case__ : int=0 , snake_case__ : Union[str, Any]=False ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self : Tuple , snake_case__ : Any=None ):
"""simple docstring"""
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
SCREAMING_SNAKE_CASE = False
return x * self.a + self.b
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int = 16 ) -> Any:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE = load_dataset('csv' , data_files=_UpperCamelCase )
SCREAMING_SNAKE_CASE = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE = {v: i for i, v in enumerate(_UpperCamelCase )}
def tokenize_function(_UpperCamelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_UpperCamelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCamelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_UpperCamelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets['train'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=2 )
SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets['validation'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ : Tuple = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(_UpperCamelCase )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = 'weight_v'
elif "weight" in name:
SCREAMING_SNAKE_CASE = 'weight'
elif "bias" in name:
SCREAMING_SNAKE_CASE = 'bias'
else:
SCREAMING_SNAKE_CASE = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE = name.split('.' )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=True ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE = target_dict.pad_index
SCREAMING_SNAKE_CASE = target_dict.bos_index
SCREAMING_SNAKE_CASE = target_dict.eos_index
SCREAMING_SNAKE_CASE = len(target_dict.symbols )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab.json' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = HubertForCTC(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = HubertModel(_UpperCamelCase )
if is_finetuned:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
a_ : Optional[Any] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCamelCase :
def __init__( self : Any , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : bool = True , snake_case__ : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = scheduler
SCREAMING_SNAKE_CASE = optimizers if isinstance(snake_case__ , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE = split_batches
SCREAMING_SNAKE_CASE = step_with_optimizer
SCREAMING_SNAKE_CASE = GradientState()
def UpperCamelCase ( self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Any ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*snake_case__ , **snake_case__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*snake_case__ , **snake_case__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE = AcceleratorState().num_processes
for _ in range(snake_case__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*snake_case__ , **snake_case__ )
else:
self.scheduler.step(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return self.scheduler.state_dict()
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
self.scheduler.load_state_dict(snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.scheduler.get_lr()
def UpperCamelCase ( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ):
"""simple docstring"""
return self.scheduler.print_lr(*snake_case__ , **snake_case__ )
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Optional[int] = "▁"
a_ : Optional[Any] = {"vocab_file": "spiece.model"}
a_ : Dict = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
a_ : Optional[int] = {
"google/reformer-crime-and-punishment": 52_4288,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[int]="</s>" , snake_case__ : Dict="<unk>" , snake_case__ : List[Any]=[] , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : int , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : str ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(snake_case__ )
return token
def UpperCamelCase ( self : int , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_UpperCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_UpperCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_UpperCamelCase )
return parser.parse_args()
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE = script_fpath.stem
SCREAMING_SNAKE_CASE = importlib.import_module(_UpperCamelCase )
# Patch sys.argv
SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
from datetime import datetime as dt
import os
from github import Github
a_ : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Github(os.environ['GITHUB_TOKEN'] )
SCREAMING_SNAKE_CASE = g.get_repo('huggingface/transformers' )
SCREAMING_SNAKE_CASE = repo.get_issues(state='open' )
for issue in open_issues:
SCREAMING_SNAKE_CASE = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCamelCase : i.created_at , reverse=_UpperCamelCase )
SCREAMING_SNAKE_CASE = comments[0] if len(_UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __lowerCAmelCase ( _UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
SCREAMING_SNAKE_CASE = s_dict.pop(_UpperCamelCase )
elif "subsample" in key:
SCREAMING_SNAKE_CASE = s_dict.pop(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE = mam_aaa['args']
SCREAMING_SNAKE_CASE = mam_aaa['model']
SCREAMING_SNAKE_CASE = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE = args.share_decoder_input_output_embed
SCREAMING_SNAKE_CASE = [int(_UpperCamelCase ) for i in args.conv_kernel_sizes.split(',' )]
SCREAMING_SNAKE_CASE = SpeechaTextConfig(
vocab_size=_UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(_UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_UpperCamelCase , num_beams=5 , max_length=2_00 , use_cache=_UpperCamelCase , decoder_start_token_id=2 , early_stopping=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = SpeechaTextForConditionalGeneration(_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE = lm_head_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
a_ : str = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a_ : Optional[Any] = True
from torch.cuda.amp import autocast
a_ : str = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to log verbose messages or not."} , )
__UpperCamelCase =field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
__UpperCamelCase =field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
__UpperCamelCase =field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __lowerCAmelCase ( _UpperCamelCase : ModelArguments , _UpperCamelCase : TrainingArguments ) -> Any:
'''simple docstring'''
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE = logging.INFO
logger.setLevel(_UpperCamelCase )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCamelCase =field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__UpperCamelCase =field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
__UpperCamelCase =field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase =field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCamelCase =field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class UpperCamelCase :
__UpperCamelCase =42
__UpperCamelCase =42
__UpperCamelCase ="longest"
__UpperCamelCase =None
__UpperCamelCase =None
def __call__( self : Tuple , snake_case__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.feature_extractor.pad(
snake_case__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch['input_values'].shape[-1] )
SCREAMING_SNAKE_CASE = batch['input_values'].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE = self.model._get_feat_extract_output_lengths(batch['attention_mask'].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['input_values'].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=snake_case__ , min_masks=2 , )
return batch
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , *snake_case__ : List[Any] , snake_case__ : Union[str, Any]=1 , snake_case__ : Any=0 , snake_case__ : Any=1.0 , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = max_gumbel_temp
SCREAMING_SNAKE_CASE = min_gumbel_temp
SCREAMING_SNAKE_CASE = gumbel_temp_decay
def UpperCamelCase ( self : Tuple , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE = self._prepare_inputs(snake_case__ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE = self.compute_loss(snake_case__ , snake_case__ )
else:
SCREAMING_SNAKE_CASE = self.compute_loss(snake_case__ , snake_case__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE = loss.sum() / (inputs['mask_time_indices']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case__ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
configure_logger(_UpperCamelCase , _UpperCamelCase )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE = DatasetDict()
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE = DatasetDict()
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='validation' , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_UpperCamelCase )
def prepare_dataset(_UpperCamelCase : str ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE = datasets.map(
_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['train'].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE = vectorized_datasets.filter(
lambda _UpperCamelCase : len(data['speech'] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_UpperCamelCase : Tuple ):
return feature_extractor(batch['speech'] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE = vectorized_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['train'].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'
' ``config.feat_extract_norm=\'layer\'' )
SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(_UpperCamelCase )
SCREAMING_SNAKE_CASE = DataCollatorForWavaVecaPretraining(model=_UpperCamelCase , feature_extractor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaPreTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , train_dataset=vectorized_datasets['train'] , eval_dataset=vectorized_datasets['validation'] , tokenizer=_UpperCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Any = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["YolosFeatureExtractor"]
a_ : str = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =XLMRobertaTokenizer
__UpperCamelCase =XLMRobertaTokenizerFast
__UpperCamelCase =True
__UpperCamelCase =True
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1_0_0_2 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(snake_case__ , keep_accents=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@cached_property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def UpperCamelCase ( self : int ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case__ , f.name )
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer(f.name , keep_accents=snake_case__ )
SCREAMING_SNAKE_CASE = pickle.dumps(snake_case__ )
pickle.loads(snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'Hello World!'
SCREAMING_SNAKE_CASE = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
SCREAMING_SNAKE_CASE = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase :
def __init__( self : Any , snake_case__ : List[Any] , snake_case__ : str=1_3 , snake_case__ : List[Any]=6_4 , snake_case__ : Tuple=2 , snake_case__ : List[Any]=3 , snake_case__ : List[Any]="swish" , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Tuple=0.1 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Optional[Any]=True , snake_case__ : int=True , snake_case__ : int=1_0 , snake_case__ : str=None , snake_case__ : Optional[Any]=0.25 , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = make_divisible(5_1_2 * width_multiplier , divisor=8 )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = conv_kernel_size
SCREAMING_SNAKE_CASE = output_stride
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = width_multiplier
SCREAMING_SNAKE_CASE = ffn_dropout
SCREAMING_SNAKE_CASE = attn_dropout
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict , snake_case__ : str , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaModelTester(self )
SCREAMING_SNAKE_CASE = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def UpperCamelCase ( self : str ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple ):
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = model.to(snake_case__ )
SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case__ )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = model.to(snake_case__ )
SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case__ )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0, 6_0)] )
SCREAMING_SNAKE_CASE = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
SCREAMING_SNAKE_CASE = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
import argparse
from collections import defaultdict
import yaml
a_ : Tuple = "docs/source/en/_toctree.yml"
def __lowerCAmelCase ( _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(_UpperCamelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(_UpperCamelCase , key=lambda _UpperCamelCase : s["title"].lower() )
def __lowerCAmelCase ( _UpperCamelCase : int=False ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE = content[api_idx]['sections']
# Then to the model doc
SCREAMING_SNAKE_CASE = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE = api_doc[model_idx]['sections']
SCREAMING_SNAKE_CASE = [(idx, section) for idx, section in enumerate(_UpperCamelCase ) if 'sections' in section]
SCREAMING_SNAKE_CASE = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE = modality_doc['sections']
SCREAMING_SNAKE_CASE = clean_model_doc_toc(_UpperCamelCase )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE = True
if overwrite:
SCREAMING_SNAKE_CASE = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE = model_doc
SCREAMING_SNAKE_CASE = api_doc
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_UpperCamelCase , allow_unicode=_UpperCamelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase :
__UpperCamelCase =BlenderbotSmallConfig
__UpperCamelCase ={}
__UpperCamelCase ="gelu"
def __init__( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[Any]=1_3 , snake_case__ : List[Any]=7 , snake_case__ : str=True , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Optional[int]=3_2 , snake_case__ : str=2 , snake_case__ : str=4 , snake_case__ : Dict=3_7 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Optional[int]=2_0 , snake_case__ : int=2 , snake_case__ : str=1 , snake_case__ : Union[str, Any]=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1E-3 )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCamelCase =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase =(
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =[
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
__UpperCamelCase ="facebook/blenderbot_small-90M"
@cached_property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , return_tensors='tf' )
SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
from __future__ import annotations
import math
def __lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
SCREAMING_SNAKE_CASE = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [True] * (num + 1)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = int(math.sqrt(_UpperCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_UpperCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _UpperCamelCase ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_UpperCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str = " " ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
SCREAMING_SNAKE_CASE = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
import random
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = a[left_index]
SCREAMING_SNAKE_CASE = left_index + 1
for j in range(left_index + 1 , _UpperCamelCase ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = a[i - 1], a[left_index]
return i - 1
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : int , _UpperCamelCase : str ) -> str:
'''simple docstring'''
if left < right:
SCREAMING_SNAKE_CASE = random.randint(_UpperCamelCase , right - 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE = partition(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
quick_sort_random(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_UpperCamelCase , pivot_index + 1 , _UpperCamelCase ) # recursive quicksort to the right of the pivot point
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n' ).strip()
SCREAMING_SNAKE_CASE = [int(_UpperCamelCase ) for item in user_input.split(',' )]
quick_sort_random(_UpperCamelCase , 0 , len(_UpperCamelCase ) )
print(_UpperCamelCase )
if __name__ == "__main__":
main()
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ : Dict = logging.get_logger(__name__)
# General docstring
a_ : int = "RegNetConfig"
# Base docstring
a_ : List[str] = "facebook/regnet-y-040"
a_ : Dict = [1, 1088, 7, 7]
# Image classification docstring
a_ : Optional[Any] = "facebook/regnet-y-040"
a_ : int = "tabby, tabby cat"
a_ : List[str] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int = 3 , snake_case__ : int = 1 , snake_case__ : int = 1 , snake_case__ : Optional[str] = "relu" , **snake_case__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase ( self : int , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.convolution(self.padding(snake_case__ ) )
SCREAMING_SNAKE_CASE = self.normalization(snake_case__ )
SCREAMING_SNAKE_CASE = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Dict , snake_case__ : RegNetConfig , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = config.num_channels
SCREAMING_SNAKE_CASE = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCamelCase ( self : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : str , snake_case__ : int , snake_case__ : int = 2 , **snake_case__ : Tuple ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
SCREAMING_SNAKE_CASE = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def UpperCamelCase ( self : List[Any] , snake_case__ : tf.Tensor , snake_case__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Tuple , snake_case__ : int , snake_case__ : int , **snake_case__ : List[Any] ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
SCREAMING_SNAKE_CASE = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.pooler(snake_case__ )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE = hidden_state * pooled
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE = self.shortcut(snake_case__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : int , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , **snake_case__ : Any ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
SCREAMING_SNAKE_CASE = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def UpperCamelCase ( self : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE = layer_module(snake_case__ )
SCREAMING_SNAKE_CASE = self.shortcut(snake_case__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , snake_case__ : RegNetConfig , snake_case__ : int , snake_case__ : int , snake_case__ : int = 2 , snake_case__ : int = 2 , **snake_case__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
SCREAMING_SNAKE_CASE = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase ( self : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
for layer_module in self.layers:
SCREAMING_SNAKE_CASE = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , snake_case__ : RegNetConfig , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F"""stages.{i+1}""" ) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : tf.Tensor , snake_case__ : bool = False , snake_case__ : bool = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE = stage_module(snake_case__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase ( tf.keras.layers.Layer ):
__UpperCamelCase =RegNetConfig
def __init__( self : Optional[int] , snake_case__ : Dict , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = TFRegNetEmbeddings(snake_case__ , name='embedder' )
SCREAMING_SNAKE_CASE = TFRegNetEncoder(snake_case__ , name='encoder' )
SCREAMING_SNAKE_CASE = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def UpperCamelCase ( self : Optional[Any] , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.embedder(snake_case__ , training=snake_case__ )
SCREAMING_SNAKE_CASE = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
SCREAMING_SNAKE_CASE = encoder_outputs[0]
SCREAMING_SNAKE_CASE = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =RegNetConfig
__UpperCamelCase ="regnet"
__UpperCamelCase ="pixel_values"
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
a_ : Optional[Any] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
a_ : Dict = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE , )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , snake_case__ : RegNetConfig , *snake_case__ : Any , **snake_case__ : Any ):
"""simple docstring"""
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self : Dict , snake_case__ : tf.Tensor , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , snake_case__ : Any=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE , )
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : str , snake_case__ : RegNetConfig , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
SCREAMING_SNAKE_CASE = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self : int , snake_case__ : tf.Tensor = None , snake_case__ : tf.Tensor = None , snake_case__ : bool = None , snake_case__ : bool = None , snake_case__ : Dict=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE = self.classifier[0](snake_case__ )
SCREAMING_SNAKE_CASE = self.classifier[1](snake_case__ )
SCREAMING_SNAKE_CASE = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str]=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : str=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dct.pop(_UpperCamelCase )
SCREAMING_SNAKE_CASE = val
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ViTConfig()
SCREAMING_SNAKE_CASE = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = int(vit_name[-12:-10] )
SCREAMING_SNAKE_CASE = int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = 7_68
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 3
elif vit_name[9:].startswith('small' ):
SCREAMING_SNAKE_CASE = 3_84
SCREAMING_SNAKE_CASE = 15_36
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
SCREAMING_SNAKE_CASE = 7_68
SCREAMING_SNAKE_CASE = 23_04
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
SCREAMING_SNAKE_CASE = 10_24
SCREAMING_SNAKE_CASE = 40_96
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
elif vit_name[4:].startswith('huge' ):
SCREAMING_SNAKE_CASE = 12_80
SCREAMING_SNAKE_CASE = 51_20
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 16
# load original model from timm
SCREAMING_SNAKE_CASE = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCamelCase )
SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE = ViTModel(_UpperCamelCase ).eval()
else:
SCREAMING_SNAKE_CASE = ViTForImageClassification(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE = encoding['pixel_values']
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
if base_model:
SCREAMING_SNAKE_CASE = timm_model.forward_features(_UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCamelCase , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE = timm_model(_UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCamelCase , outputs.logits , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a_ : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ : int = False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe.dual_guided(
prompt='first prompt' , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe.dual_guided(
prompt='first prompt' , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe.dual_guided(
prompt=snake_case__ , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe.text_to_image(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE = pipe.image_variation(snake_case__ , generator=snake_case__ , output_type='numpy' ).images
SCREAMING_SNAKE_CASE = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Tuple = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="gptj"
__UpperCamelCase ={
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , snake_case__ : List[Any]=5_0_4_0_0 , snake_case__ : List[Any]=2_0_4_8 , snake_case__ : str=4_0_9_6 , snake_case__ : Dict=2_8 , snake_case__ : int=1_6 , snake_case__ : Tuple=6_4 , snake_case__ : Union[str, Any]=None , snake_case__ : Any="gelu_new" , snake_case__ : List[Any]=0.0 , snake_case__ : str=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=1E-5 , snake_case__ : Tuple=0.02 , snake_case__ : List[str]=True , snake_case__ : Tuple=5_0_2_5_6 , snake_case__ : Union[str, Any]=5_0_2_5_6 , snake_case__ : Optional[Any]=False , **snake_case__ : Optional[int] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = rotary_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , snake_case__ : PretrainedConfig , snake_case__ : str = "default" , snake_case__ : List[PatchingSpec] = None , snake_case__ : bool = False , ):
"""simple docstring"""
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE = 0
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return self._config.n_layer
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return self._config.n_head
def UpperCamelCase ( self : Optional[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return 1_3
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
a_ : Dict = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
a_ : int = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
a_ : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[str, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len([g for position, g in enumerate(_UpperCamelCase ) if g == main_target[position]] )
return (item, float(_UpperCamelCase ))
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[str, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = random.randint(0 , len(_UpperCamelCase ) - 1 )
SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE = random.choice(_UpperCamelCase )
return "".join(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : tuple[str, float] , _UpperCamelCase : list[tuple[str, float]] , _UpperCamelCase : list[str] , ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE = 10 if child_n >= 10 else child_n
for _ in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = population_score[random.randint(0 , _UpperCamelCase )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = crossover(parent_a[0] , _UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
return pop
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] , _UpperCamelCase : bool = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_UpperCamelCase )
# Generate random starting population.
SCREAMING_SNAKE_CASE = []
for _ in range(_UpperCamelCase ):
population.append(''.join([random.choice(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE = [evaluate(_UpperCamelCase , _UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCamelCase )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE = [
(item, score / len(_UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCamelCase ):
population.extend(select(population_score[int(_UpperCamelCase )] , _UpperCamelCase , _UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
a_ : Union[str, Any] = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
a_ : Optional[int] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
a_ , a_ , a_ : List[str] = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a_ : Optional[int] = True
except ImportError:
a_ : Optional[int] = False
try:
from torch.hub import _get_torch_home
a_ : Dict = _get_torch_home()
except ImportError:
a_ : str = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
a_ : str = os.path.join(torch_cache_home, "transformers")
a_ : int = "https://cdn.huggingface.co"
a_ : Optional[int] = "https://s3.amazonaws.com/models.huggingface.co/bert"
a_ : Union[str, Any] = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
a_ : Tuple = os.path.join(PATH, "config.yaml")
a_ : Tuple = os.path.join(PATH, "attributes.txt")
a_ : List[Any] = os.path.join(PATH, "objects.txt")
a_ : List[Any] = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
a_ : int = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
a_ : Optional[int] = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
a_ : List[Any] = "pytorch_model.bin"
a_ : List[str] = "config.yaml"
def __lowerCAmelCase ( _UpperCamelCase : Tuple=OBJECTS , _UpperCamelCase : Any=ATTRIBUTES ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
with open(_UpperCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
SCREAMING_SNAKE_CASE = []
with open(_UpperCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OrderedDict()
with open(_UpperCamelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE = pkl.load(_UpperCamelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE = ckp.pop(_UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE = torch.tensor(_UpperCamelCase )
else:
assert isinstance(_UpperCamelCase , torch.tensor ), type(_UpperCamelCase )
SCREAMING_SNAKE_CASE = v
return r
class UpperCamelCase :
__UpperCamelCase ={}
def __init__( self : Tuple , snake_case__ : dict , snake_case__ : str = "root" , snake_case__ : Dict=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = name
SCREAMING_SNAKE_CASE = level
SCREAMING_SNAKE_CASE = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE = copy.deepcopy(snake_case__ )
SCREAMING_SNAKE_CASE = copy.deepcopy(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = Config(snake_case__ , name=snake_case__ , level=level + 1 )
SCREAMING_SNAKE_CASE = v
setattr(self , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = d
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
SCREAMING_SNAKE_CASE = self._pointer
if len(snake_case__ ) > 1:
for i, l in enumerate(snake_case__ ):
if hasattr(self , snake_case__ ) and isinstance(getattr(self , snake_case__ ) , snake_case__ ):
setattr(getattr(self , snake_case__ ) , '.'.join(levels[i:] ) , snake_case__ )
if l == last_level:
SCREAMING_SNAKE_CASE = val
else:
SCREAMING_SNAKE_CASE = pointer[l]
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return self._pointer
def UpperCamelCase ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
with open(F"""{file_name}""" , 'w' ) as stream:
dump(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : Tuple , snake_case__ : str ):
"""simple docstring"""
with open(F"""{file_name}""" , 'w' ) as stream:
json.dump(snake_case__ , snake_case__ )
@staticmethod
def UpperCamelCase ( snake_case__ : Union[str, Any] ):
"""simple docstring"""
with open(snake_case__ ) as stream:
SCREAMING_SNAKE_CASE = load(snake_case__ , Loader=snake_case__ )
return data
def __str__( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ' '
if self._name != "root":
SCREAMING_SNAKE_CASE = F"""{t * (self._level-1)}{self._name}:\n"""
else:
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(snake_case__ , snake_case__ ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(snake_case__ ).__name__})\n"""
SCREAMING_SNAKE_CASE = level
return r[:-1]
@classmethod
def UpperCamelCase ( cls : int , snake_case__ : str , **snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(snake_case__ , **snake_case__ )
return cls(snake_case__ )
@classmethod
def UpperCamelCase ( cls : int , snake_case__ : str , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.pop('cache_dir' , snake_case__ )
SCREAMING_SNAKE_CASE = kwargs.pop('force_download' , snake_case__ )
SCREAMING_SNAKE_CASE = kwargs.pop('resume_download' , snake_case__ )
SCREAMING_SNAKE_CASE = kwargs.pop('proxies' , snake_case__ )
SCREAMING_SNAKE_CASE = kwargs.pop('local_files_only' , snake_case__ )
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , snake_case__ )
elif os.path.isfile(snake_case__ ) or is_remote_url(snake_case__ ):
SCREAMING_SNAKE_CASE = pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE = hf_bucket_url(snake_case__ , filename=snake_case__ , use_cdn=snake_case__ )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE = cached_path(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE = Config.load_yaml(snake_case__ )
except EnvironmentError:
SCREAMING_SNAKE_CASE = 'Can\'t load config for'
raise EnvironmentError(snake_case__ )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(snake_case__ ), kwargs
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load('dump.pt' , map_location=in_tensor.device )
SCREAMING_SNAKE_CASE = in_tensor.numpy()
SCREAMING_SNAKE_CASE = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_UpperCamelCase , _UpperCamelCase , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(_UpperCamelCase , _UpperCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = urlparse(_UpperCamelCase )
return parsed.scheme in ("http", "https")
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : List[Any]=True ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE = '/' not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : int=None , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=None , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
ua += "; " + "; ".join('{}/{}'.format(_UpperCamelCase , _UpperCamelCase ) for k, v in user_agent.items() )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE = {'user-agent': ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE = 'bytes=%d-' % (resume_size,)
SCREAMING_SNAKE_CASE = requests.get(_UpperCamelCase , stream=_UpperCamelCase , proxies=_UpperCamelCase , headers=_UpperCamelCase )
if response.status_code == 4_16: # Range not satisfiable
return
SCREAMING_SNAKE_CASE = response.headers.get('Content-Length' )
SCREAMING_SNAKE_CASE = resume_size + int(_UpperCamelCase ) if content_length is not None else None
SCREAMING_SNAKE_CASE = tqdm(
unit='B' , unit_scale=_UpperCamelCase , total=_UpperCamelCase , initial=_UpperCamelCase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_UpperCamelCase ) )
temp_file.write(_UpperCamelCase )
progress.close()
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Dict=False , _UpperCamelCase : int=None , _UpperCamelCase : Union[str, Any]=10 , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Tuple=None , _UpperCamelCase : List[str]=False , ) -> Optional[int]:
'''simple docstring'''
if cache_dir is None:
SCREAMING_SNAKE_CASE = TRANSFORMERS_CACHE
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
SCREAMING_SNAKE_CASE = None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE = requests.head(_UpperCamelCase , allow_redirects=_UpperCamelCase , proxies=_UpperCamelCase , timeout=_UpperCamelCase )
if response.status_code == 2_00:
SCREAMING_SNAKE_CASE = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE = url_to_filename(_UpperCamelCase , _UpperCamelCase )
# get cache path to put the file
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_UpperCamelCase ):
return cache_path
else:
SCREAMING_SNAKE_CASE = [
file
for file in fnmatch.filter(os.listdir(_UpperCamelCase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_UpperCamelCase ) > 0:
return os.path.join(_UpperCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_UpperCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE = cache_path + '.lock'
with FileLock(_UpperCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_UpperCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(_UpperCamelCase , 'a+b' ) as f:
yield f
SCREAMING_SNAKE_CASE = _resumable_file_manager
if os.path.exists(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = os.stat(_UpperCamelCase ).st_size
else:
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = partial(tempfile.NamedTemporaryFile , dir=_UpperCamelCase , delete=_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , _UpperCamelCase , temp_file.name , )
http_get(
_UpperCamelCase , _UpperCamelCase , proxies=_UpperCamelCase , resume_size=_UpperCamelCase , user_agent=_UpperCamelCase , )
os.replace(temp_file.name , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {'url': url, 'etag': etag}
SCREAMING_SNAKE_CASE = cache_path + '.json'
with open(_UpperCamelCase , 'w' ) as meta_file:
json.dump(_UpperCamelCase , _UpperCamelCase )
return cache_path
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]=None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = url.encode('utf-8' )
SCREAMING_SNAKE_CASE = shaaaa(_UpperCamelCase )
SCREAMING_SNAKE_CASE = url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE = etag.encode('utf-8' )
SCREAMING_SNAKE_CASE = shaaaa(_UpperCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=False , _UpperCamelCase : Tuple=None , _UpperCamelCase : str=False , _UpperCamelCase : int=None , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : str=False , _UpperCamelCase : str=False , ) -> List[Any]:
'''simple docstring'''
if cache_dir is None:
SCREAMING_SNAKE_CASE = TRANSFORMERS_CACHE
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
if is_remote_url(_UpperCamelCase ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE = get_from_cache(
_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , user_agent=_UpperCamelCase , local_files_only=_UpperCamelCase , )
elif os.path.exists(_UpperCamelCase ):
# File, and it exists.
SCREAMING_SNAKE_CASE = url_or_filename
elif urlparse(_UpperCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_UpperCamelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_UpperCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_UpperCamelCase ) and not tarfile.is_tarfile(_UpperCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = os.path.split(_UpperCamelCase )
SCREAMING_SNAKE_CASE = output_file.replace('.' , '-' ) + '-extracted'
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE = output_path + '.lock'
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
os.makedirs(_UpperCamelCase )
if is_zipfile(_UpperCamelCase ):
with ZipFile(_UpperCamelCase , 'r' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_UpperCamelCase ) )
return output_path_extracted
return output_path
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict="," ) -> Dict:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase ) as f:
SCREAMING_SNAKE_CASE = eval(f.read() )
else:
SCREAMING_SNAKE_CASE = requests.get(_UpperCamelCase )
try:
SCREAMING_SNAKE_CASE = requests.json()
except Exception:
SCREAMING_SNAKE_CASE = req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE = eval(_UpperCamelCase )
except Exception:
SCREAMING_SNAKE_CASE = data.split('\n' )
req.close()
return data
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = requests.get(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_UpperCamelCase )
with open(_UpperCamelCase , 'rb' ) as stream:
SCREAMING_SNAKE_CASE = pkl.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = weights.pop('model' )
SCREAMING_SNAKE_CASE = {}
for k, v in model.items():
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase )
if "running_var" in k:
SCREAMING_SNAKE_CASE = torch.tensor([0] )
SCREAMING_SNAKE_CASE = k.replace('running_var' , 'num_batches_tracked' )
SCREAMING_SNAKE_CASE = zero
return new
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
print(f"""{os.path.abspath(os.path.join(_UpperCamelCase , os.pardir ) )}/demo.ipynb""" )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]="RGB" ) -> Tuple:
'''simple docstring'''
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if os.path.isfile(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = cva.imread(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = get_image_from_url(_UpperCamelCase )
assert img is not None, f"""could not connect to: {im}"""
SCREAMING_SNAKE_CASE = cva.cvtColor(_UpperCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE = img[:, :, ::-1]
return img
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str]=1 ) -> int:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ))
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The column name of the images in the files."} )
__UpperCamelCase =field(default=SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the training data."} )
__UpperCamelCase =field(default=SCREAMING_SNAKE_CASE , metadata={"help": "A folder containing the validation data."} )
__UpperCamelCase =field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE = self.validation_dir
SCREAMING_SNAKE_CASE = data_files if data_files else None
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
__UpperCamelCase =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCamelCase =field(default=SCREAMING_SNAKE_CASE , metadata={"help": "Name or path of preprocessor config."} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__UpperCamelCase =field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE = ds['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE = split['train']
SCREAMING_SNAKE_CASE = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
SCREAMING_SNAKE_CASE = ViTMAEForPreTraining(_UpperCamelCase )
if training_args.do_train:
SCREAMING_SNAKE_CASE = ds['train'].column_names
else:
SCREAMING_SNAKE_CASE = ds['validation'].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE = 'image'
elif "img" in column_names:
SCREAMING_SNAKE_CASE = 'img'
else:
SCREAMING_SNAKE_CASE = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE = image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE = (image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE = Compose(
[
Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCamelCase : str ):
SCREAMING_SNAKE_CASE = [transforms(_UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCamelCase )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =StableDiffusionLatentUpscalePipeline
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
__UpperCamelCase =PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase =frozenset([] )
__UpperCamelCase =True
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = (1_6, 1_6)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
def UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=snake_case__ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=snake_case__ , only_cross_attention=snake_case__ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
SCREAMING_SNAKE_CASE = EulerDiscreteScheduler(prediction_type='sample' )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='quick_gelu' , projection_dim=5_1_2 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(snake_case__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
SCREAMING_SNAKE_CASE = np.array(
[0.47_222_412, 0.41_921_633, 0.44_717_434, 0.46_874_192, 0.42_588_258, 0.46_150_726, 0.4_677_534, 0.45_583_832, 0.48_579_055] )
SCREAMING_SNAKE_CASE = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**snake_case__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE = getattr(snake_case__ , scheduler_enum.name )
SCREAMING_SNAKE_CASE = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE = pipe(**snake_case__ )[0]
outputs.append(snake_case__ )
assert check_same_shape(snake_case__ )
@require_torch_gpu
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.manual_seed(3_3 )
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
SCREAMING_SNAKE_CASE = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
SCREAMING_SNAKE_CASE = pipe(snake_case__ , generator=snake_case__ , output_type='latent' ).images
SCREAMING_SNAKE_CASE = upscaler(
prompt=snake_case__ , image=snake_case__ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case__ , output_type='np' , ).images[0]
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.manual_seed(3_3 )
SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
SCREAMING_SNAKE_CASE = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
SCREAMING_SNAKE_CASE = upscaler(
prompt=snake_case__ , image=snake_case__ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case__ , output_type='np' , ).images[0]
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int = 2_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 0
for i in range(_UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['projector.weight']
SCREAMING_SNAKE_CASE = downstream_dict['projector.bias']
SCREAMING_SNAKE_CASE = downstream_dict['model.post_net.linear.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.post_net.linear.bias']
return model
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['model.linear.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.linear.bias']
return model
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniSpeechSatForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase )
SCREAMING_SNAKE_CASE = downstream_dict['connector.weight']
SCREAMING_SNAKE_CASE = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
SCREAMING_SNAKE_CASE = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
SCREAMING_SNAKE_CASE = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
SCREAMING_SNAKE_CASE = downstream_dict['objective.W']
return model
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE = checkpoint['Downstream']
SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
SCREAMING_SNAKE_CASE = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
SCREAMING_SNAKE_CASE = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif arch.endswith('ForXVector' ):
SCREAMING_SNAKE_CASE = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
a_ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : float = 1e-12 , _UpperCamelCase : int = 1_00 , ) -> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(_UpperCamelCase )[0] == np.shape(_UpperCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_UpperCamelCase )[0] == np.shape(_UpperCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_UpperCamelCase ) == np.iscomplexobj(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.iscomplexobj(_UpperCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_UpperCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1e12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE = np.dot(_UpperCamelCase , _UpperCamelCase )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE = w / np.linalg.norm(_UpperCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE = np.dot(_UpperCamelCase , np.dot(_UpperCamelCase , _UpperCamelCase ) )
# Check convergence.
SCREAMING_SNAKE_CASE = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = lambda_
if is_complex:
SCREAMING_SNAKE_CASE = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
SCREAMING_SNAKE_CASE = np.array([41, 4, 20] )
SCREAMING_SNAKE_CASE = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE = real_input_matrix
SCREAMING_SNAKE_CASE = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE = complex_input_matrix
SCREAMING_SNAKE_CASE = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = power_iteration(_UpperCamelCase , _UpperCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.linalg.eigh(_UpperCamelCase )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_UpperCamelCase ) - np.abs(_UpperCamelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Any = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCamelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_tool('text-classification' )
self.tool.setup()
SCREAMING_SNAKE_CASE = load_tool('text-classification' , remote=snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(snake_case__ , 'positive' )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(snake_case__ , 'positive' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(snake_case__ , 'positive' )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(snake_case__ , 'positive' )
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
a_ : int = 5
a_ : Union[str, Any] = 10
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =SpeechaTextTokenizer
__UpperCamelCase =False
__UpperCamelCase =True
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = sp.SentencePieceProcessor()
spm_model.Load(snake_case__ )
SCREAMING_SNAKE_CASE = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(snake_case__ ) )]
SCREAMING_SNAKE_CASE = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['spm_file'] )
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(snake_case__ ) , 1_0_0_1 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'input_ids': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase ="valhalla/s2t_mustc_multilinguial_medium"
__UpperCamelCase ="C'est trop cool"
__UpperCamelCase ="Esto es genial"
@classmethod
def UpperCamelCase ( cls : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 1_1 )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
SCREAMING_SNAKE_CASE = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'fr'
SCREAMING_SNAKE_CASE = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , snake_case__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =inspect.getfile(accelerate.test_utils )
__UpperCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
__UpperCamelCase =["accelerate", "launch"]
__UpperCamelCase =Path.home() / ".cache/huggingface/accelerate"
__UpperCamelCase ="default_config.yaml"
__UpperCamelCase =config_folder / config_file
__UpperCamelCase =config_folder / "_default_config.yaml"
__UpperCamelCase =Path("tests/test_configs" )
@classmethod
def UpperCamelCase ( cls : str ):
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCamelCase ( cls : Optional[int] ):
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=snake_case__ ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(snake_case__ ), self.test_file_path] , env=os.environ.copy() )
def UpperCamelCase ( self : str ):
"""simple docstring"""
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase ="test-tpu"
__UpperCamelCase ="us-central1-a"
__UpperCamelCase ="ls"
__UpperCamelCase =["accelerate", "tpu-config"]
__UpperCamelCase ="cd /usr/share"
__UpperCamelCase ="tests/test_samples/test_command_file.sh"
__UpperCamelCase ="Running gcloud compute tpus tpu-vm ssh"
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=snake_case__ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=snake_case__ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , snake_case__ , )
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a_ : List[str] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
a_ : Union[str, Any] = parser.parse_args()
a_ : Optional[int] = "cpu"
a_ : List[Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
a_ : Tuple = "path-to-your-trained-model"
a_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a_ : str = pipe.to(device)
# to channels last
a_ : Tuple = pipe.unet.to(memory_format=torch.channels_last)
a_ : Optional[Any] = pipe.vae.to(memory_format=torch.channels_last)
a_ : str = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a_ : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a_ : List[Any] = torch.randn(2, 4, 64, 64)
a_ : List[str] = torch.rand(1) * 999
a_ : Optional[Any] = torch.randn(2, 77, 768)
a_ : List[str] = (sample, timestep, encoder_hidden_status)
try:
a_ : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a_ : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a_ : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a_ : Union[str, Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a_ : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a_ : Union[str, Any] = 666
a_ : Any = torch.Generator(device).manual_seed(seed)
a_ : Optional[int] = {"generator": generator}
if args.steps is not None:
a_ : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a_ : Union[str, Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str ) -> Union[str, Any]: # noqa: E741
'''simple docstring'''
while r - l > 1:
SCREAMING_SNAKE_CASE = (l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE = m
else:
SCREAMING_SNAKE_CASE = m # noqa: E741
return r
def __lowerCAmelCase ( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
SCREAMING_SNAKE_CASE = [0] * len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE = v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE = v[i]
length += 1
else:
SCREAMING_SNAKE_CASE = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ : int = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : List[Any] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="owlvit_text_model"
def __init__( self : int , snake_case__ : int=4_9_4_0_8 , snake_case__ : str=5_1_2 , snake_case__ : str=2_0_4_8 , snake_case__ : Optional[Any]=1_2 , snake_case__ : List[Any]=8 , snake_case__ : str=1_6 , snake_case__ : int="quick_gelu" , snake_case__ : int=1E-5 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[Any]=1.0 , snake_case__ : Optional[int]=0 , snake_case__ : Any=4_9_4_0_6 , snake_case__ : str=4_9_4_0_7 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = initializer_factor
@classmethod
def UpperCamelCase ( cls : List[str] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
SCREAMING_SNAKE_CASE = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="owlvit_vision_model"
def __init__( self : List[str] , snake_case__ : str=7_6_8 , snake_case__ : Any=3_0_7_2 , snake_case__ : str=1_2 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Dict=3 , snake_case__ : Dict=7_6_8 , snake_case__ : Dict=3_2 , snake_case__ : str="quick_gelu" , snake_case__ : Optional[int]=1E-5 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Any=0.02 , snake_case__ : Optional[int]=1.0 , **snake_case__ : int , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = initializer_factor
@classmethod
def UpperCamelCase ( cls : Any , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
SCREAMING_SNAKE_CASE = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="owlvit"
__UpperCamelCase =True
def __init__( self : Union[str, Any] , snake_case__ : int=None , snake_case__ : str=None , snake_case__ : Dict=5_1_2 , snake_case__ : int=2.6_592 , snake_case__ : Optional[Any]=True , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
if text_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
SCREAMING_SNAKE_CASE = OwlViTTextConfig(**snake_case__ )
SCREAMING_SNAKE_CASE = OwlViTVisionConfig(**snake_case__ )
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = logit_scale_init_value
SCREAMING_SNAKE_CASE = return_dict
SCREAMING_SNAKE_CASE = 1.0
@classmethod
def UpperCamelCase ( cls : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : List[str] ):
"""simple docstring"""
cls._set_token_in_kwargs(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(snake_case__ , **snake_case__ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
@classmethod
def UpperCamelCase ( cls : str , snake_case__ : Dict , snake_case__ : Dict , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = text_config
SCREAMING_SNAKE_CASE = vision_config
return cls.from_dict(snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.text_config.to_dict()
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return 1E-4
def UpperCamelCase ( self : int , snake_case__ : "ProcessorMixin" , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : Optional["TensorType"] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(
processor.tokenizer , batch_size=snake_case__ , seq_length=snake_case__ , framework=snake_case__ )
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(
processor.image_processor , batch_size=snake_case__ , framework=snake_case__ )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return 1_4
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
from functools import reduce
a_ : str = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __lowerCAmelCase ( _UpperCamelCase : str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCamelCase , _UpperCamelCase : str(int(_UpperCamelCase ) * int(_UpperCamelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCamelCase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCamelCase ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=_UpperCamelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_original_entity_vocab(_UpperCamelCase )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 'MLukeTokenizer'
with open(os.path.join(_UpperCamelCase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
with open(os.path.join(_UpperCamelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(_UpperCamelCase )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE = state_dict[bias_name]
SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = f"""encoder.layer.{layer_index}.attention.self."""
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=_UpperCamelCase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE = state_dict[key]
else:
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if set(_UpperCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(_UpperCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(_UpperCamelCase , task='entity_classification' )
SCREAMING_SNAKE_CASE = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE = (0, 9)
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE = (24, 30)
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(_UpperCamelCase ) )
model.save_pretrained(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE = [json.loads(_UpperCamelCase ) for line in open(_UpperCamelCase )]
SCREAMING_SNAKE_CASE = {}
for entry in data:
SCREAMING_SNAKE_CASE = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE = entity_id
break
SCREAMING_SNAKE_CASE = f"""{language}:{entity_name}"""
SCREAMING_SNAKE_CASE = entity_id
return new_mapping
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
a_ : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
from __future__ import annotations
a_ : Optional[Any] = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowerCAmelCase ( _UpperCamelCase : Matrix , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = digit
if sudoku(_UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE = 0
return None
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCamelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ort.SessionOptions()
SCREAMING_SNAKE_CASE = False
return options
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
SCREAMING_SNAKE_CASE = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A red cat sitting on a park bench'
SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
SCREAMING_SNAKE_CASE = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A red cat sitting on a park bench'
SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=snake_case__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Any = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="donut-swin"
__UpperCamelCase ={
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , snake_case__ : Dict=2_2_4 , snake_case__ : Tuple=4 , snake_case__ : Optional[Any]=3 , snake_case__ : Union[str, Any]=9_6 , snake_case__ : List[Any]=[2, 2, 6, 2] , snake_case__ : Optional[Any]=[3, 6, 1_2, 2_4] , snake_case__ : str=7 , snake_case__ : Optional[Any]=4.0 , snake_case__ : Union[str, Any]=True , snake_case__ : int=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : List[str]=0.1 , snake_case__ : Dict="gelu" , snake_case__ : int=False , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=1E-5 , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = use_absolute_embeddings
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : str = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =["input_features", "is_longer"]
def __init__( self : int , snake_case__ : List[Any]=6_4 , snake_case__ : Any=4_8_0_0_0 , snake_case__ : Tuple=4_8_0 , snake_case__ : Union[str, Any]=1_0 , snake_case__ : Dict=1_0_2_4 , snake_case__ : str=0.0 , snake_case__ : Any=False , snake_case__ : float = 0 , snake_case__ : float = 1_4_0_0_0 , snake_case__ : int = None , snake_case__ : str = "fusion" , snake_case__ : str = "repeatpad" , **snake_case__ : List[Any] , ):
"""simple docstring"""
super().__init__(
feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = top_db
SCREAMING_SNAKE_CASE = truncation
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = fft_window_size
SCREAMING_SNAKE_CASE = (fft_window_size >> 1) + 1
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = max_length_s
SCREAMING_SNAKE_CASE = max_length_s * sampling_rate
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = frequency_min
SCREAMING_SNAKE_CASE = frequency_max
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm=snake_case__ , mel_scale='htk' , )
SCREAMING_SNAKE_CASE = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case__ , min_frequency=snake_case__ , max_frequency=snake_case__ , sampling_rate=snake_case__ , norm='slaney' , mel_scale='slaney' , )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase ( self : str , snake_case__ : np.array , snake_case__ : Optional[np.array] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = spectrogram(
snake_case__ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case__ , log_mel='dB' , )
return log_mel_spectrogram.T
def UpperCamelCase ( self : str , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
SCREAMING_SNAKE_CASE = [0]
# randomly choose index for each part
SCREAMING_SNAKE_CASE = np.random.choice(ranges[0] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[1] )
SCREAMING_SNAKE_CASE = np.random.choice(ranges[2] )
SCREAMING_SNAKE_CASE = mel[idx_front : idx_front + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_middle : idx_middle + chunk_frames, :]
SCREAMING_SNAKE_CASE = mel[idx_back : idx_back + chunk_frames, :]
SCREAMING_SNAKE_CASE = torch.tensor(mel[None, None, :] )
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
snake_case__ , size=[chunk_frames, 6_4] , mode='bilinear' , align_corners=snake_case__ )
SCREAMING_SNAKE_CASE = mel_shrink[0][0].numpy()
SCREAMING_SNAKE_CASE = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase ( self : Tuple , snake_case__ : np.array , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
SCREAMING_SNAKE_CASE = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
SCREAMING_SNAKE_CASE = len(snake_case__ ) - max_length
SCREAMING_SNAKE_CASE = np.random.randint(0 , overflow + 1 )
SCREAMING_SNAKE_CASE = waveform[idx : idx + max_length]
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
SCREAMING_SNAKE_CASE = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
SCREAMING_SNAKE_CASE = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
SCREAMING_SNAKE_CASE = np.stack([mel, mel, mel, mel] , axis=0 )
SCREAMING_SNAKE_CASE = False
else:
SCREAMING_SNAKE_CASE = self._random_mel_fusion(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
SCREAMING_SNAKE_CASE = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
SCREAMING_SNAKE_CASE = int(max_length / len(snake_case__ ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(snake_case__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
SCREAMING_SNAKE_CASE = int(max_length / len(snake_case__ ) )
SCREAMING_SNAKE_CASE = np.stack(np.tile(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE = np.pad(snake_case__ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(snake_case__ , self.mel_filters )
SCREAMING_SNAKE_CASE = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
SCREAMING_SNAKE_CASE = self._np_extract_fbank_features(snake_case__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[int] , snake_case__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case__ : str = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : List[str] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = truncation if truncation is not None else self.truncation
SCREAMING_SNAKE_CASE = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE = isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE = is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
SCREAMING_SNAKE_CASE = np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE = [np.asarray(snake_case__ )]
# convert to mel spectrogram, truncate and pad if needed.
SCREAMING_SNAKE_CASE = [
self._get_input_mel(snake_case__ , max_length if max_length else self.nb_max_samples , snake_case__ , snake_case__ )
for waveform in raw_speech
]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for mel, longer in padded_inputs:
input_mel.append(snake_case__ )
is_longer.append(snake_case__ )
if truncation == "fusion" and sum(snake_case__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
SCREAMING_SNAKE_CASE = np.random.randint(0 , len(snake_case__ ) )
SCREAMING_SNAKE_CASE = True
if isinstance(input_mel[0] , snake_case__ ):
SCREAMING_SNAKE_CASE = [np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
SCREAMING_SNAKE_CASE = [[longer] for longer in is_longer]
SCREAMING_SNAKE_CASE = {'input_features': input_mel, 'is_longer': is_longer}
SCREAMING_SNAKE_CASE = BatchFeature(snake_case__ )
if return_tensors is not None:
SCREAMING_SNAKE_CASE = input_features.convert_to_tensors(snake_case__ )
return input_features
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a_ : Union[str, Any] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
a_ : Dict = (720, 1280) # Height, Width
a_ : Tuple = (0.4, 0.6) # if height or width lower than this scale, drop it.
a_ : str = 1 / 100
a_ : Dict = ""
a_ : Optional[int] = ""
a_ : int = ""
a_ : Any = 250
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataset(_UpperCamelCase , _UpperCamelCase )
for index in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = random.sample(range(len(_UpperCamelCase ) ) , 4 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = update_image_and_anno(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , filter_scale=_UpperCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE = random_chars(32 )
SCREAMING_SNAKE_CASE = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
SCREAMING_SNAKE_CASE = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
SCREAMING_SNAKE_CASE = []
for anno in new_annos:
SCREAMING_SNAKE_CASE = anno[3] - anno[1]
SCREAMING_SNAKE_CASE = anno[4] - anno[2]
SCREAMING_SNAKE_CASE = anno[1] + width / 2
SCREAMING_SNAKE_CASE = anno[2] + height / 2
SCREAMING_SNAKE_CASE = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_UpperCamelCase )
with open(f"""{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[list, list]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for label_file in glob.glob(os.path.join(_UpperCamelCase , '*.txt' ) ):
SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(_UpperCamelCase ) as in_file:
SCREAMING_SNAKE_CASE = in_file.readlines()
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""{label_name}.jpg""" )
SCREAMING_SNAKE_CASE = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE = obj_list.rstrip('\n' ).split(' ' )
SCREAMING_SNAKE_CASE = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_UpperCamelCase )
labels.append(_UpperCamelCase )
return img_paths, labels
def __lowerCAmelCase ( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list[int] , _UpperCamelCase : tuple[int, int] , _UpperCamelCase : tuple[float, float] , _UpperCamelCase : float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, index in enumerate(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = all_img_list[index]
path_list.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE = all_annos[index]
SCREAMING_SNAKE_CASE = cva.imread(_UpperCamelCase )
if i == 0: # top-left
SCREAMING_SNAKE_CASE = cva.resize(_UpperCamelCase , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = bbox[1] * scale_x
SCREAMING_SNAKE_CASE = bbox[2] * scale_y
SCREAMING_SNAKE_CASE = bbox[3] * scale_x
SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE = cva.resize(_UpperCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE = bbox[2] * scale_y
SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE = cva.resize(_UpperCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = bbox[1] * scale_x
SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE = bbox[3] * scale_x
SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE = cva.resize(
_UpperCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowerCAmelCase ( _UpperCamelCase : int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE = ascii_lowercase + digits
return "".join(random.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ : int = logging.get_logger(__name__)
a_ : List[Any] = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="conditional_detr"
__UpperCamelCase =["past_key_values"]
__UpperCamelCase ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : int , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=None , snake_case__ : Any=3 , snake_case__ : Any=3_0_0 , snake_case__ : Union[str, Any]=6 , snake_case__ : int=2_0_4_8 , snake_case__ : Tuple=8 , snake_case__ : List[Any]=6 , snake_case__ : Union[str, Any]=2_0_4_8 , snake_case__ : str=8 , snake_case__ : List[Any]=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : int=True , snake_case__ : Tuple="relu" , snake_case__ : List[str]=2_5_6 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=0.0 , snake_case__ : Any=0.0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=1.0 , snake_case__ : int=False , snake_case__ : str="sine" , snake_case__ : Dict="resnet50" , snake_case__ : Tuple=True , snake_case__ : List[Any]=False , snake_case__ : Union[str, Any]=2 , snake_case__ : Union[str, Any]=5 , snake_case__ : Optional[Any]=2 , snake_case__ : Dict=1 , snake_case__ : str=1 , snake_case__ : str=2 , snake_case__ : Any=5 , snake_case__ : Optional[int]=2 , snake_case__ : List[Any]=0.25 , **snake_case__ : List[str] , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(snake_case__ )
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = cls_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =version.parse("1.11" )
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return 1_2
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ : Optional[Any] = get_logger()
a_ : Optional[dict] = None
class UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : List[str]=None , **snake_case__ : Tuple ):
"""simple docstring"""
super().__init__(features=snake_case__ )
import jax
from jaxlib.xla_client import Device
if isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(snake_case__ )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
SCREAMING_SNAKE_CASE = device if isinstance(snake_case__ , snake_case__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
SCREAMING_SNAKE_CASE = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE = jnp_array_kwargs
@staticmethod
def UpperCamelCase ( ):
"""simple docstring"""
import jax
return {str(snake_case__ ): device for device in jax.devices()}
def UpperCamelCase ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , snake_case__ ) and column:
if all(
isinstance(snake_case__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(snake_case__ , axis=0 )
return column
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , (str, bytes, type(snake_case__ )) ):
return value
elif isinstance(snake_case__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE = {'dtype': jnp.intaa}
else:
SCREAMING_SNAKE_CASE = {'dtype': jnp.intaa}
elif isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE = np.asarray(snake_case__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(snake_case__ , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase ( self : str , snake_case__ : Dict ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(snake_case__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(snake_case__ , '__array__' ) and not isinstance(snake_case__ , jax.Array ):
SCREAMING_SNAKE_CASE = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
elif isinstance(snake_case__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
return self._tensorize(snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , snake_case__ , map_list=snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : pa.Table ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_row(snake_case__ )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_row(snake_case__ )
return self.recursive_tensorize(snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : pa.Table ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_column(snake_case__ )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_column(snake_case__ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE = self.recursive_tensorize(snake_case__ )
SCREAMING_SNAKE_CASE = self._consolidate(snake_case__ )
return column
def UpperCamelCase ( self : Tuple , snake_case__ : pa.Table ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_batch(snake_case__ )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_batch(snake_case__ )
SCREAMING_SNAKE_CASE = self.recursive_tensorize(snake_case__ )
for column_name in batch:
SCREAMING_SNAKE_CASE = self._consolidate(batch[column_name] )
return batch
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Any = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a_ : int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a_ : Any = "cuda" if torch.cuda.is_available() else "cpu"
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Any=1_00 , _UpperCamelCase : List[Any]=" " ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = text.split(_UpperCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )]
def __lowerCAmelCase ( _UpperCamelCase : dict ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(_UpperCamelCase ):
titles.append(title if title is not None else '' )
texts.append(_UpperCamelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase ( _UpperCamelCase : dict , _UpperCamelCase : DPRContextEncoder , _UpperCamelCase : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=_UpperCamelCase , padding='longest' , return_tensors='pt' )['input_ids']
SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=_UpperCamelCase ) , return_dict=_UpperCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase ( _UpperCamelCase : "RagExampleArguments" , _UpperCamelCase : "ProcessingArguments" , _UpperCamelCase : "IndexHnswArguments" , ) -> int:
'''simple docstring'''
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE = dataset.map(
partial(_UpperCamelCase , ctx_encoder=_UpperCamelCase , ctx_tokenizer=_UpperCamelCase ) , batched=_UpperCamelCase , batch_size=processing_args.batch_size , features=_UpperCamelCase , )
# And finally save your dataset
SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(_UpperCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=_UpperCamelCase )
# And save the index
SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(_UpperCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=str(Path(SCREAMING_SNAKE_CASE ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
__UpperCamelCase =field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
__UpperCamelCase =field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
__UpperCamelCase =field(
default=str(Path(SCREAMING_SNAKE_CASE ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__UpperCamelCase =field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a_ : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a_ , a_ , a_ : int = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a_ : int = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
a_ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : int = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
a_ : Optional[Any] = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
a_ : Dict = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =ElectraTokenizer
def __init__( self : Any , snake_case__ : List[str]=None , snake_case__ : Tuple=None , snake_case__ : str=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Any="[SEP]" , snake_case__ : Any="[PAD]" , snake_case__ : str="[CLS]" , snake_case__ : Optional[int]="[MASK]" , snake_case__ : Optional[Any]=True , snake_case__ : int=None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(snake_case__ , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**snake_case__ )
SCREAMING_SNAKE_CASE = do_lower_case
def UpperCamelCase ( self : List[str] , snake_case__ : List[str] , snake_case__ : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =CTRLTokenizer
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
SCREAMING_SNAKE_CASE = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
SCREAMING_SNAKE_CASE = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def UpperCamelCase ( self : List[str] , **snake_case__ : Any ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'adapt react readapt apt'
SCREAMING_SNAKE_CASE = 'adapt react readapt apt'
return input_text, output_text
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE = 'adapt react readapt apt'
SCREAMING_SNAKE_CASE = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=0.9_99 , _UpperCamelCase : Optional[Any]="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ) , _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase , dtype=torch.floataa )
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =[e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase =2
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.00_085 , snake_case__ : float = 0.012 , snake_case__ : str = "linear" , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None , snake_case__ : str = "epsilon" , snake_case__ : Optional[bool] = False , snake_case__ : Optional[bool] = False , snake_case__ : float = 1.0 , snake_case__ : str = "linspace" , snake_case__ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE = torch.tensor(snake_case__ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE = torch.linspace(snake_case__ , snake_case__ , snake_case__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE = betas_for_alpha_bar(snake_case__ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE = betas_for_alpha_bar(snake_case__ , alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
SCREAMING_SNAKE_CASE = 1.0 - self.betas
SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = use_karras_sigmas
def UpperCamelCase ( self : str , snake_case__ : Dict , snake_case__ : List[Any]=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE = self.timesteps
SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE = 1 if len(snake_case__ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.index_for_timestep(snake_case__ )
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Union[str, torch.device] = None , snake_case__ : Optional[int] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = num_inference_steps
SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE = np.linspace(0 , num_train_timesteps - 1 , snake_case__ , dtype=snake_case__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(snake_case__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE = (np.arange(snake_case__ , 0 , -step_ratio )).round().copy().astype(snake_case__ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE = np.log(snake_case__ )
SCREAMING_SNAKE_CASE = np.interp(snake_case__ , np.arange(0 , len(snake_case__ ) ) , snake_case__ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=snake_case__ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(snake_case__ , snake_case__ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ )
SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case__ ).startswith('mps' ):
# mps does not support float64
SCREAMING_SNAKE_CASE = timesteps.to(snake_case__ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE = timesteps.to(device=snake_case__ )
# empty dt and derivative
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE = defaultdict(snake_case__ )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Any , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.log(snake_case__ )
# get distribution
SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE = low_idx + 1
SCREAMING_SNAKE_CASE = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE = np.clip(snake_case__ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE = in_sigmas[0].item()
SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE = np.linspace(0 , 1 , snake_case__ )
SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return self.dt is None
def UpperCamelCase ( self : List[Any] , snake_case__ : Union[torch.FloatTensor, np.ndarray] , snake_case__ : Union[float, torch.FloatTensor] , snake_case__ : Union[torch.FloatTensor, np.ndarray] , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.index_for_timestep(snake_case__ )
# advance index counter by 1
SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE = derivative
SCREAMING_SNAKE_CASE = dt
SCREAMING_SNAKE_CASE = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE = self.dt
SCREAMING_SNAKE_CASE = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case__ ):
# mps does not support float64
SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE = [self.index_for_timestep(snake_case__ , snake_case__ ) for t in timesteps]
SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
"""simple docstring"""
return self.config.num_train_timesteps
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_UpperCamelCase ), magnitude * sin(_UpperCamelCase )]
return [magnitude * cos(radians(_UpperCamelCase ) ), magnitude * sin(radians(_UpperCamelCase ) )]
def __lowerCAmelCase ( _UpperCamelCase : NDArray[floataa] , _UpperCamelCase : NDArray[floataa] , _UpperCamelCase : float = 10**-1 ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = cross(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = sum(_UpperCamelCase )
return abs(_UpperCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
a_ : str = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
a_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
a_ : Tuple = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
a_ : Optional[int] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
a_ : List[Any] = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
a_ : str = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.