code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def a ( *_lowercase : Optional[int] , **_lowercase : List[str] ):
pass
@is_pipeline_test
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@require_torch
def a ( self : Dict ):
__UpperCAmelCase = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__UpperCAmelCase = image_classifier(_lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
__UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
] , )
@require_tf
def a ( self : Optional[Any] ):
__UpperCAmelCase = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__UpperCAmelCase = image_classifier(_lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
__UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
{'''score''': 0.333, '''label''': ANY(_lowercase )},
],
] , )
@slow
@require_torch
def a ( self : List[str] ):
__UpperCAmelCase = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__UpperCAmelCase = image_classifier(_lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
__UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def a ( self : Dict ):
__UpperCAmelCase = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__UpperCAmelCase = image_classifier(_lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
__UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 49 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
a__ : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
a__ : str = "document_qa"
a__ : int = AutoProcessor
a__ : Any = VisionEncoderDecoderModel
a__ : int = ["image", "text"]
a__ : Any = ["text"]
def __init__( self : List[Any] , *_lowercase : List[Any] , **_lowercase : List[Any] ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_lowercase , **_lowercase )
def a ( self : str , _lowercase : "Image" , _lowercase : str ):
__UpperCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__UpperCAmelCase = task_prompt.replace('''{user_input}''' , _lowercase )
__UpperCAmelCase = self.pre_processor.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors='''pt''' ).input_ids
__UpperCAmelCase = self.pre_processor(_lowercase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def a ( self : int , _lowercase : List[Any] ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowercase , ).sequences
def a ( self : int , _lowercase : Optional[Any] ):
__UpperCAmelCase = self.pre_processor.batch_decode(_lowercase )[0]
__UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__UpperCAmelCase = re.sub(r'''<.*?>''' , '''''' , _lowercase , count=1 ).strip() # remove first task start token
__UpperCAmelCase = self.pre_processor.tokenajson(_lowercase )
return sequence["answer"]
| 49 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : List[Any] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = "data2vec-text"
def __init__( self : Dict , _lowercase : Union[str, Any]=3_05_22 , _lowercase : Tuple=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : Dict=12 , _lowercase : int=30_72 , _lowercase : Any="gelu" , _lowercase : List[str]=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : Dict=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Optional[int]=0.02 , _lowercase : str=1E-12 , _lowercase : List[str]=1 , _lowercase : List[str]=0 , _lowercase : List[Any]=2 , _lowercase : List[str]="absolute" , _lowercase : str=True , _lowercase : Optional[int]=None , **_lowercase : int , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Dict ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float ):
return 10 - x * x
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case_ ) * equation(snake_case_ ) >= 0:
raise ValueError('''Wrong space!''' )
__UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
__UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(snake_case_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_ ) * equation(snake_case_ ) < 0:
__UpperCAmelCase = c
else:
__UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 49 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowercase : Any = 10
def lowercase__ ( snake_case_ :list[int] ):
__UpperCAmelCase = 1
__UpperCAmelCase = max(snake_case_ )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase = [[] for _ in range(snake_case_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase = int((i / placement) % RADIX )
buckets[tmp].append(snake_case_ )
# put each buckets' contents into list_of_ints
__UpperCAmelCase = 0
for b in range(snake_case_ ):
for i in buckets[b]:
__UpperCAmelCase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 50_000_000 ):
__UpperCAmelCase = set()
__UpperCAmelCase = int((limit - 24) ** (1 / 2) )
__UpperCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case_ ) ) )
for primea in primes:
__UpperCAmelCase = primea * primea
for primea in primes:
__UpperCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__UpperCAmelCase = primea * primea * primea * primea
__UpperCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str , snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Tuple ):
# Load configuration defined in the metadata file
with open(snake_case_ ) as metadata_file:
__UpperCAmelCase = json.load(snake_case_ )
__UpperCAmelCase = LukeConfig(use_entity_aware_attention=snake_case_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__UpperCAmelCase = torch.load(snake_case_ , map_location='''cpu''' )
# Load the entity vocab file
__UpperCAmelCase = load_entity_vocab(snake_case_ )
__UpperCAmelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__UpperCAmelCase = AddedToken('''<ent>''' , lstrip=snake_case_ , rstrip=snake_case_ )
__UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=snake_case_ , rstrip=snake_case_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
__UpperCAmelCase = LukeTokenizer.from_pretrained(snake_case_ )
# Initialize the embeddings of the special tokens
__UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
__UpperCAmelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
__UpperCAmelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
__UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__UpperCAmelCase = F'''encoder.layer.{layer_index}.attention.self.'''
__UpperCAmelCase = state_dict[prefix + matrix_name]
__UpperCAmelCase = state_dict[prefix + matrix_name]
__UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
__UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']]
__UpperCAmelCase = LukeModel(config=snake_case_ ).eval()
__UpperCAmelCase , __UpperCAmelCase = model.load_state_dict(snake_case_ , strict=snake_case_ )
if not (len(snake_case_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(snake_case_ )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
__UpperCAmelCase = LukeTokenizer.from_pretrained(snake_case_ , task='''entity_classification''' )
__UpperCAmelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
__UpperCAmelCase = (39, 42)
__UpperCAmelCase = tokenizer(snake_case_ , entity_spans=[span] , add_prefix_space=snake_case_ , return_tensors='''pt''' )
__UpperCAmelCase = model(**snake_case_ )
# Verify word hidden states
if model_size == "large":
__UpperCAmelCase = torch.Size((1, 42, 1_024) )
__UpperCAmelCase = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
__UpperCAmelCase = torch.Size((1, 42, 768) )
__UpperCAmelCase = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__UpperCAmelCase = torch.Size((1, 1, 1_024) )
__UpperCAmelCase = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
__UpperCAmelCase = torch.Size((1, 1, 768) )
__UpperCAmelCase = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(snake_case_ ) )
model.save_pretrained(snake_case_ )
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = {}
with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(snake_case_ ):
__UpperCAmelCase , __UpperCAmelCase = line.rstrip().split('''\t''' )
__UpperCAmelCase = index
return entity_vocab
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_lowercase : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 49 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class _UpperCAmelCase :
def __init__( self : Optional[int] , _lowercase : int , _lowercase : MutableSequence[float] ):
if len(_lowercase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
__UpperCAmelCase = list(_lowercase )
__UpperCAmelCase = degree
def __add__( self : Union[str, Any] , _lowercase : Polynomial ):
if self.degree > polynomial_a.degree:
__UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _lowercase )
else:
__UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _lowercase )
def __sub__( self : Optional[Any] , _lowercase : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : int , _lowercase : Polynomial ):
__UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _lowercase )
def a ( self : List[Any] , _lowercase : int | float ):
__UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ):
__UpperCAmelCase = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowercase )
return polynomial
def __repr__( self : Optional[int] ):
return self.__str__()
def a ( self : Tuple ):
__UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
__UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _lowercase )
def a ( self : Any , _lowercase : int | float = 0 ):
__UpperCAmelCase = [0] * (self.degree + 2)
__UpperCAmelCase = constant
for i in range(self.degree + 1 ):
__UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _lowercase )
def __eq__( self : Optional[int] , _lowercase : object ):
if not isinstance(_lowercase , _lowercase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Any , _lowercase : object ):
return not self.__eq__(_lowercase )
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
"""simple docstring"""
import numpy
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : numpy.ndarray , _lowercase : numpy.ndarray ):
__UpperCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__UpperCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__UpperCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__UpperCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
__UpperCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__UpperCAmelCase = numpy.zeros(output_array.shape )
def a ( self : str ):
__UpperCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def a ( self : str ):
__UpperCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__UpperCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__UpperCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def a ( self : Optional[Any] , _lowercase : numpy.ndarray , _lowercase : int , _lowercase : bool ):
for iteration in range(1 , iterations + 1 ):
__UpperCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
__UpperCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def a ( self : Optional[Any] , _lowercase : numpy.ndarray ):
__UpperCAmelCase = input_arr
__UpperCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase__ ( snake_case_ :numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def lowercase__ ( snake_case_ :numpy.ndarray ):
return (value) * (1 - (value))
def lowercase__ ( ):
__UpperCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__UpperCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__UpperCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=snake_case_ , output_array=snake_case_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=snake_case_ , iterations=10 , give_loss=snake_case_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 49 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :List[str]=28_123 ):
__UpperCAmelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__UpperCAmelCase = set()
__UpperCAmelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 49 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_lowercase : Any = parser.parse_args()
_lowercase : int = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 49 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[str] ):
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = 8
# DPR tok
__UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__UpperCAmelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
__UpperCAmelCase = os.path.join(_lowercase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
__UpperCAmelCase = os.path.join(_lowercase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(_lowercase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowercase ) )
def a ( self : str ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def a ( self : Tuple ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def a ( self : int ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a ( self : int ):
__UpperCAmelCase = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
__UpperCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__UpperCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_lowercase )
rag_tokenizer.save_pretrained(_lowercase )
__UpperCAmelCase = RagTokenizer.from_pretrained(_lowercase , config=_lowercase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _lowercase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _lowercase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a ( self : int ):
__UpperCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
__UpperCAmelCase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__UpperCAmelCase = tokenizer(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
def a ( self : List[str] ):
__UpperCAmelCase = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
__UpperCAmelCase = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__UpperCAmelCase = tokenizer(_lowercase )
self.assertIsNotNone(_lowercase )
| 49 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 1 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCAmelCase :
def __init__( self : Tuple , _lowercase : int=None , _lowercase : List[str]=None , _lowercase : Optional[Any]=None , _lowercase : List[Any]=None , _lowercase : int=None ):
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase )
def a ( self : Any , _lowercase : Optional[int]=None , _lowercase : Any=None , _lowercase : Dict=None , _lowercase : Any=None , _lowercase : int=None ):
if red is not None:
__UpperCAmelCase = red
if green is not None:
__UpperCAmelCase = green
if blue is not None:
__UpperCAmelCase = blue
if red_edge is not None:
__UpperCAmelCase = red_edge
if nir is not None:
__UpperCAmelCase = nir
return True
def a ( self : Union[str, Any] , _lowercase : List[str]="" , _lowercase : Dict=None , _lowercase : int=None , _lowercase : Optional[Any]=None , _lowercase : Optional[int]=None , _lowercase : Optional[Any]=None ):
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase )
__UpperCAmelCase = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def a ( self : Any ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def a ( self : List[str] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def a ( self : List[Any] ):
return self.nir * (self.red / (self.green**2))
def a ( self : Optional[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def a ( self : Union[str, Any] ):
return (self.nir - self.red) / (self.nir + self.red)
def a ( self : Optional[int] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def a ( self : Dict ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def a ( self : Union[str, Any] ):
return (self.nir - self.green) / (self.nir + self.green)
def a ( self : List[str] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def a ( self : Tuple ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def a ( self : int ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def a ( self : Optional[Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def a ( self : int , _lowercase : Tuple=0.08 , _lowercase : str=1.22 , _lowercase : Dict=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def a ( self : List[str] ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def a ( self : List[str] ):
return (self.nir / self.green) - 1
def a ( self : Optional[Any] ):
return (self.nir / self.redEdge) - 1
def a ( self : Dict ):
return (self.red - self.blue) / self.red
def a ( self : Dict ):
__UpperCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def a ( self : Tuple ):
return self.nir - self.green
def a ( self : Union[str, Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def a ( self : Tuple ):
__UpperCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def a ( self : Optional[Any] , _lowercase : List[str]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def a ( self : int , _lowercase : Union[str, Any]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def a ( self : List[str] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def a ( self : List[str] , _lowercase : Tuple=None , _lowercase : Any=None ):
return (self.nir - b) / (a * self.red)
def a ( self : Tuple ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def a ( self : str ):
return (self.red + self.green + self.blue) / 30.5
def a ( self : Optional[Any] ):
return self.nir / self.red
def a ( self : Optional[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def a ( self : Any ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def a ( self : Union[str, Any] ):
return self.green / (self.nir + self.red + self.green)
def a ( self : List[Any] ):
return self.nir / (self.nir + self.red + self.green)
def a ( self : List[Any] ):
return self.red / (self.nir + self.red + self.green)
def a ( self : List[str] ):
return (self.green - self.red) / (self.green + self.red)
def a ( self : Any ):
return (self.red - self.green) / (self.red + self.green)
def a ( self : Any ):
__UpperCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__UpperCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def a ( self : int ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def a ( self : Tuple ):
return self.nir / self.red
def a ( self : List[Any] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def a ( self : Tuple ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
__UpperCAmelCase = hex_num[0] == '''-'''
if is_negative:
__UpperCAmelCase = hex_num[1:]
try:
__UpperCAmelCase = int(snake_case_ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
__UpperCAmelCase = ''''''
while int_num > 0:
__UpperCAmelCase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_lowercase : Optional[int] = TypeVar('KT')
_lowercase : Optional[int] = TypeVar('VT')
class _UpperCAmelCase ( Generic[KT, VT] ):
def __init__( self : List[Any] , _lowercase : KT | str = "root" , _lowercase : VT | None = None ):
__UpperCAmelCase = key
__UpperCAmelCase = value
__UpperCAmelCase = []
def __repr__( self : str ):
return F'''Node({self.key}: {self.value})'''
@property
def a ( self : Union[str, Any] ):
return len(self.forward )
class _UpperCAmelCase ( Generic[KT, VT] ):
def __init__( self : int , _lowercase : float = 0.5 , _lowercase : int = 16 ):
__UpperCAmelCase = Node[KT, VT]()
__UpperCAmelCase = 0
__UpperCAmelCase = p
__UpperCAmelCase = max_level
def __str__( self : Dict ):
__UpperCAmelCase = list(self )
if len(_lowercase ) == 0:
return F'''SkipList(level={self.level})'''
__UpperCAmelCase = max((len(str(_lowercase ) ) for item in items) , default=4 )
__UpperCAmelCase = max(_lowercase , 4 ) + 4
__UpperCAmelCase = self.head
__UpperCAmelCase = []
__UpperCAmelCase = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(_lowercase , '''-''' ) + '''* ''' * len(_lowercase ) )
lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) )
while len(node.forward ) != 0:
__UpperCAmelCase = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(_lowercase , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) )
__UpperCAmelCase = node.forward
lines.append('''None'''.ljust(_lowercase ) + '''* ''' * len(_lowercase ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(_lowercase )
def __iter__( self : int ):
__UpperCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__UpperCAmelCase = node.forward[0]
def a ( self : Optional[Any] ):
__UpperCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = []
__UpperCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__UpperCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def a ( self : Optional[Any] , _lowercase : KT ):
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__UpperCAmelCase = node.forward[i]
else:
__UpperCAmelCase = update_node.forward[:i]
def a ( self : Union[str, Any] , _lowercase : KT , _lowercase : VT ):
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase )
if node is not None:
__UpperCAmelCase = value
else:
__UpperCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
__UpperCAmelCase = level
__UpperCAmelCase = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
__UpperCAmelCase = new_node
def a ( self : List[Any] , _lowercase : VT ):
__UpperCAmelCase , __UpperCAmelCase = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__UpperCAmelCase = skip_list.head
__UpperCAmelCase = {}
while node.level != 0:
__UpperCAmelCase = node.forward[0]
__UpperCAmelCase = node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__UpperCAmelCase = skip_list.head
__UpperCAmelCase = {}
while node.level != 0:
__UpperCAmelCase = node.forward[0]
__UpperCAmelCase = node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
assert skip_list.find('''Some key''' ) is None
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(snake_case_ :int ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowercase__ ( ):
def is_sorted(snake_case_ :Union[str, Any] ):
return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) )
__UpperCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(snake_case_ , snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case_ ) )
def lowercase__ ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowercase__ ( ):
__UpperCAmelCase = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str = "The quick brown fox jumps over the lazy dog" , ):
__UpperCAmelCase = set()
# Replace all the whitespace in our sentence
__UpperCAmelCase = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(snake_case_ ) == 26
def lowercase__ ( snake_case_ :str = "The quick brown fox jumps over the lazy dog" , ):
__UpperCAmelCase = [False] * 26
for char in input_str:
if char.islower():
__UpperCAmelCase = True
elif char.isupper():
__UpperCAmelCase = True
return all(snake_case_ )
def lowercase__ ( snake_case_ :str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase__ ( ):
from timeit import timeit
__UpperCAmelCase = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=snake_case_ ) )
print(timeit('''is_pangram_faster()''' , setup=snake_case_ ) )
print(timeit('''is_pangram_fastest()''' , setup=snake_case_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 1 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Tuple , _lowercase : Distribution , _lowercase : List[str]=None , _lowercase : str=None , _lowercase : Tuple=0 ):
__UpperCAmelCase = 1.0 if scale is None else scale
__UpperCAmelCase = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def a ( self : Tuple ):
return self.base_dist.mean * self.scale + self.loc
@property
def a ( self : int ):
return self.base_dist.variance * self.scale**2
@property
def a ( self : Optional[int] ):
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
def __init__( self : List[str] , _lowercase : int , _lowercase : Dict[str, int] , _lowercase : Callable[..., Tuple[torch.Tensor]] , **_lowercase : Tuple ):
super().__init__(**_lowercase )
__UpperCAmelCase = args_dim
__UpperCAmelCase = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__UpperCAmelCase = domain_map
def a ( self : int , _lowercase : torch.Tensor ):
__UpperCAmelCase = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Union[str, Any] , _lowercase : List[str] ):
super().__init__()
__UpperCAmelCase = function
def a ( self : Union[str, Any] , _lowercase : Tuple , *_lowercase : Optional[Any] ):
return self.function(_lowercase , *_lowercase )
class _UpperCAmelCase :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__( self : List[str] , _lowercase : int = 1 ):
__UpperCAmelCase = dim
__UpperCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def a ( self : List[str] , _lowercase : Optional[Any] ):
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def a ( self : Optional[int] , _lowercase : str , _lowercase : Optional[torch.Tensor] = None , _lowercase : Optional[torch.Tensor] = None , ):
__UpperCAmelCase = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def a ( self : List[str] ):
return () if self.dim == 1 else (self.dim,)
@property
def a ( self : List[str] ):
return len(self.event_shape )
@property
def a ( self : Dict ):
return 0.0
def a ( self : Dict , _lowercase : int ):
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a ( self : List[str] , *_lowercase : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def a ( _lowercase : torch.Tensor ):
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def a ( cls : Optional[Any] , _lowercase : torch.Tensor , _lowercase : torch.Tensor , _lowercase : torch.Tensor ):
__UpperCAmelCase = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__UpperCAmelCase = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def a ( cls : Optional[int] , _lowercase : torch.Tensor , _lowercase : torch.Tensor ):
__UpperCAmelCase = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def a ( cls : Tuple , _lowercase : torch.Tensor , _lowercase : torch.Tensor ):
__UpperCAmelCase = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a ( self : Optional[Any] , _lowercase : Any ):
__UpperCAmelCase , __UpperCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[torch.Tensor] = None , _lowercase : Optional[torch.Tensor] = None ):
__UpperCAmelCase , __UpperCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 49 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Any = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = "pegasus"
a__ : Optional[int] = ["past_key_values"]
a__ : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , _lowercase : Tuple=5_02_65 , _lowercase : Dict=10_24 , _lowercase : int=12 , _lowercase : Any=40_96 , _lowercase : Tuple=16 , _lowercase : List[str]=12 , _lowercase : List[Any]=40_96 , _lowercase : List[str]=16 , _lowercase : Any=0.0 , _lowercase : int=0.0 , _lowercase : str=True , _lowercase : Union[str, Any]=True , _lowercase : Tuple="gelu" , _lowercase : Any=10_24 , _lowercase : Optional[int]=0.1 , _lowercase : Tuple=0.0 , _lowercase : Tuple=0.0 , _lowercase : Union[str, Any]=0.02 , _lowercase : Tuple=0 , _lowercase : str=False , _lowercase : Tuple=0 , _lowercase : Any=1 , _lowercase : List[Any]=1 , **_lowercase : Dict , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
@property
def a ( self : Any ):
return self.encoder_attention_heads
@property
def a ( self : List[Any] ):
return self.d_model
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 600_851_475_143 ):
try:
__UpperCAmelCase = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__UpperCAmelCase = 2
__UpperCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCAmelCase = i
while n % i == 0:
__UpperCAmelCase = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 1 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
class _UpperCAmelCase :
a__ : str
a__ : str = None
@staticmethod
def a ( ):
raise NotImplementedError
def a ( self : List[str] , _lowercase : Dict , _lowercase : int , _lowercase : str , **_lowercase : Optional[int] ):
raise NotImplementedError
def a ( self : Tuple , _lowercase : List[str] ):
raise NotImplementedError
def a ( self : Any ):
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def a ( cls : Optional[int] ):
return F'''`pip install {cls.pip_package or cls.name}`'''
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "optuna"
@staticmethod
def a ( ):
return is_optuna_available()
def a ( self : List[Any] , _lowercase : Dict , _lowercase : int , _lowercase : str , **_lowercase : Any ):
return run_hp_search_optuna(_lowercase , _lowercase , _lowercase , **_lowercase )
def a ( self : Dict , _lowercase : str ):
return default_hp_space_optuna(_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[int] = "ray"
a__ : Union[str, Any] = "'ray[tune]'"
@staticmethod
def a ( ):
return is_ray_available()
def a ( self : Any , _lowercase : int , _lowercase : int , _lowercase : str , **_lowercase : int ):
return run_hp_search_ray(_lowercase , _lowercase , _lowercase , **_lowercase )
def a ( self : Optional[int] , _lowercase : Optional[Any] ):
return default_hp_space_ray(_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = "sigopt"
@staticmethod
def a ( ):
return is_sigopt_available()
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : int , _lowercase : str , **_lowercase : Optional[int] ):
return run_hp_search_sigopt(_lowercase , _lowercase , _lowercase , **_lowercase )
def a ( self : Dict , _lowercase : List[Any] ):
return default_hp_space_sigopt(_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[int] = "wandb"
@staticmethod
def a ( ):
return is_wandb_available()
def a ( self : Optional[Any] , _lowercase : Any , _lowercase : int , _lowercase : str , **_lowercase : Optional[int] ):
return run_hp_search_wandb(_lowercase , _lowercase , _lowercase , **_lowercase )
def a ( self : Union[str, Any] , _lowercase : List[str] ):
return default_hp_space_wandb(_lowercase )
_lowercase : str = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowercase__ ( ):
__UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
__UpperCAmelCase = available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
F'''{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 49 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def lowercase__ ( ):
__UpperCAmelCase = 10
__UpperCAmelCase = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
__UpperCAmelCase = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(snake_case_ ) ),
} , features=snake_case_ , )
return dataset
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Tuple ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=snake_case_ )
return filename
# FILE_CONTENT + files
_lowercase : Tuple = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
__UpperCAmelCase = FILE_CONTENT
with open(snake_case_ , '''w''' ) as f:
f.write(snake_case_ )
return filename
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Tuple ):
import bza
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
__UpperCAmelCase = bytes(snake_case_ , '''utf-8''' )
with bza.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Union[str, Any] ):
import gzip
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
__UpperCAmelCase = bytes(snake_case_ , '''utf-8''' )
with gzip.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Any ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
__UpperCAmelCase = bytes(snake_case_ , '''utf-8''' )
with lza.frame.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Optional[int] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(snake_case_ , '''w''' ) as archive:
archive.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[str] ):
import tarfile
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Dict ):
import lzma
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
__UpperCAmelCase = bytes(snake_case_ , '''utf-8''' )
with lzma.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict ):
import zipfile
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Union[str, Any] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
__UpperCAmelCase = bytes(snake_case_ , '''utf-8''' )
with zstd.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Any ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
__UpperCAmelCase = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(snake_case_ , '''w''' ) as f:
f.write(snake_case_ )
return filename
_lowercase : Optional[int] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_lowercase : Optional[int] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_lowercase : str = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_lowercase : str = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_lowercase : Dict = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='''session''' )
def lowercase__ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = datasets.Dataset.from_dict(snake_case_ )
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(snake_case_ ) ) as con:
__UpperCAmelCase = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(snake_case_ , '''w''' , newline='''''' ) as f:
__UpperCAmelCase = csv.DictWriter(snake_case_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(snake_case_ , '''w''' , newline='''''' ) as f:
__UpperCAmelCase = csv.DictWriter(snake_case_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] ):
import bza
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(snake_case_ , '''rb''' ) as f:
__UpperCAmelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Dict , snake_case_ :List[Any] , snake_case_ :Optional[int] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(snake_case_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Optional[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Any ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
__UpperCAmelCase = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(snake_case_ , '''wb''' ) as f:
__UpperCAmelCase = pq.ParquetWriter(snake_case_ , schema=snake_case_ )
__UpperCAmelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(snake_case_ ) )] for k in DATA[0]} , schema=snake_case_ )
writer.write_table(snake_case_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__UpperCAmelCase = {'''data''': DATA}
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
__UpperCAmelCase = {'''data''': DATA_DICT_OF_LISTS}
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Tuple ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :List[str] ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
import gzip
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(snake_case_ , '''rb''' ) as orig_file:
with gzip.open(snake_case_ , '''wb''' ) as zipped_file:
zipped_file.writelines(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
import gzip
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(snake_case_ , '''rb''' ) as orig_file:
with gzip.open(snake_case_ , '''wb''' ) as zipped_file:
zipped_file.writelines(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :str , snake_case_ :List[Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[Any] , snake_case_ :Any , snake_case_ :Optional[int] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''nested''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Tuple , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :int , snake_case_ :Optional[int] , snake_case_ :str , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.join('''nested''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Any ):
__UpperCAmelCase = ['''0''', '''1''', '''2''', '''3''']
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Any ):
__UpperCAmelCase = ['''0''', '''1''', '''2''', '''3''']
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Tuple ):
__UpperCAmelCase = ['''0''', '''1''', '''2''', '''3''']
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] , snake_case_ :Any ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Optional[Any] , snake_case_ :Any ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :Tuple ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(snake_case_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
__UpperCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def lowercase__ ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :str , snake_case_ :int ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def lowercase__ ( snake_case_ :Optional[int] ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 49 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple=14 , _lowercase : List[Any]=7 , _lowercase : Optional[Any]=True , _lowercase : str=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[str]=True , _lowercase : Union[str, Any]=99 , _lowercase : Any=32 , _lowercase : Union[str, Any]=5 , _lowercase : List[Any]=4 , _lowercase : Optional[Any]=37 , _lowercase : Dict="gelu" , _lowercase : Tuple=0.1 , _lowercase : List[str]=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : str=16 , _lowercase : List[Any]=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : int=3 , _lowercase : List[Any]=4 , _lowercase : Union[str, Any]=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_labels
__UpperCAmelCase = use_mc_token_ids
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
__UpperCAmelCase = self.vocab_size - 1
def a ( self : Optional[Any] ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
if self.use_mc_token_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
__UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a ( self : Tuple ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def a ( self : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Tuple , _lowercase : int , *_lowercase : Tuple ):
__UpperCAmelCase = CTRLModel(config=_lowercase )
model.to(_lowercase )
model.eval()
model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase )
model(_lowercase , token_type_ids=_lowercase )
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def a ( self : Any , _lowercase : Any , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Any , *_lowercase : str ):
__UpperCAmelCase = CTRLLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def a ( self : Tuple , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : List[str] , *_lowercase : Optional[int] ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = CTRLForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ : int = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : List[str] = True
a__ : Dict = False
a__ : int = False
def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def a ( self : List[Any] ):
__UpperCAmelCase = CTRLModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , n_embd=37 )
def a ( self : List[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def a ( self : Tuple ):
self.config_tester.run_common_tests()
def a ( self : Dict ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a ( self : Tuple ):
pass
@slow
def a ( self : List[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = CTRLModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def a ( self : Union[str, Any] ):
pass
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def a ( self : Union[str, Any] ):
__UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_lowercase )
__UpperCAmelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=_lowercase ) # Legal the president is
__UpperCAmelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__UpperCAmelCase = model.generate(_lowercase , do_sample=_lowercase )
self.assertListEqual(output_ids[0].tolist() , _lowercase )
| 49 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : str ):
__UpperCAmelCase = torch.nn.Linear(10 , 10 )
__UpperCAmelCase = torch.optim.SGD(model.parameters() , 0.1 )
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 49 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : Optional[int] = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
a__ : Optional[int] = "resnet"
a__ : str = ["basic", "bottleneck"]
def __init__( self : str , _lowercase : Dict=3 , _lowercase : Optional[int]=64 , _lowercase : Dict=[2_56, 5_12, 10_24, 20_48] , _lowercase : Optional[int]=[3, 4, 6, 3] , _lowercase : List[Any]="bottleneck" , _lowercase : List[str]="relu" , _lowercase : int=False , _lowercase : Dict=None , _lowercase : str=None , **_lowercase : Any , ):
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
__UpperCAmelCase = num_channels
__UpperCAmelCase = embedding_size
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = depths
__UpperCAmelCase = layer_type
__UpperCAmelCase = hidden_act
__UpperCAmelCase = downsample_in_first_stage
__UpperCAmelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_lowercase ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = version.parse("1.11" )
@property
def a ( self : Optional[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a ( self : int ):
return 1E-3
| 49 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowercase : int = logging.get_logger(__name__)
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , *_lowercase : int , **_lowercase : Tuple ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 49 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : torch.FloatTensor
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Optional[int] , _lowercase : int = 3 , _lowercase : int = 3 , _lowercase : Tuple[str] = ("DownEncoderBlock2D",) , _lowercase : Tuple[str] = ("UpDecoderBlock2D",) , _lowercase : Tuple[int] = (64,) , _lowercase : int = 1 , _lowercase : str = "silu" , _lowercase : int = 3 , _lowercase : int = 32 , _lowercase : int = 2_56 , _lowercase : int = 32 , _lowercase : Optional[int] = None , _lowercase : float = 0.18_215 , _lowercase : str = "group" , ):
super().__init__()
# pass init params to Encoder
__UpperCAmelCase = Encoder(
in_channels=_lowercase , out_channels=_lowercase , down_block_types=_lowercase , block_out_channels=_lowercase , layers_per_block=_lowercase , act_fn=_lowercase , norm_num_groups=_lowercase , double_z=_lowercase , )
__UpperCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__UpperCAmelCase = nn.Convad(_lowercase , _lowercase , 1 )
__UpperCAmelCase = VectorQuantizer(_lowercase , _lowercase , beta=0.25 , remap=_lowercase , sane_index_shape=_lowercase )
__UpperCAmelCase = nn.Convad(_lowercase , _lowercase , 1 )
# pass init params to Decoder
__UpperCAmelCase = Decoder(
in_channels=_lowercase , out_channels=_lowercase , up_block_types=_lowercase , block_out_channels=_lowercase , layers_per_block=_lowercase , act_fn=_lowercase , norm_num_groups=_lowercase , norm_type=_lowercase , )
@apply_forward_hook
def a ( self : List[Any] , _lowercase : torch.FloatTensor , _lowercase : bool = True ):
__UpperCAmelCase = self.encoder(_lowercase )
__UpperCAmelCase = self.quant_conv(_lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowercase )
@apply_forward_hook
def a ( self : Dict , _lowercase : torch.FloatTensor , _lowercase : bool = False , _lowercase : bool = True ):
# also go through quantization layer
if not force_not_quantize:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.quantize(_lowercase )
else:
__UpperCAmelCase = h
__UpperCAmelCase = self.post_quant_conv(_lowercase )
__UpperCAmelCase = self.decoder(_lowercase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase )
def a ( self : Optional[Any] , _lowercase : torch.FloatTensor , _lowercase : bool = True ):
__UpperCAmelCase = sample
__UpperCAmelCase = self.encode(_lowercase ).latents
__UpperCAmelCase = self.decode(_lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase )
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__ ( snake_case_ :list[list[float]] ):
__UpperCAmelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(snake_case_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__UpperCAmelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__UpperCAmelCase = [[0.0, 0.0], [0.0, 0.0]]
__UpperCAmelCase , __UpperCAmelCase = matrix[1][1], matrix[0][0]
__UpperCAmelCase , __UpperCAmelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(snake_case_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(snake_case_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__UpperCAmelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__UpperCAmelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__UpperCAmelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__UpperCAmelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__UpperCAmelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__UpperCAmelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__UpperCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__UpperCAmelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__UpperCAmelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__UpperCAmelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__UpperCAmelCase = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
__UpperCAmelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__UpperCAmelCase = array(snake_case_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(snake_case_ )
# Calculate the inverse of the matrix
return [[float(d(snake_case_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 49 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 1 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowercase : List[Any] = logging.getLogger(__name__)
def lowercase__ ( snake_case_ :List[Any]=2 , snake_case_ :Optional[Any]=3 , snake_case_ :List[str]=16 , snake_case_ :int = 10 , snake_case_ :int = 2 ):
def get_dataset(snake_case_ :List[Any] ):
__UpperCAmelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCAmelCase = get_dataset(snake_case_ )
__UpperCAmelCase = get_dataset(snake_case_ )
__UpperCAmelCase = DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
__UpperCAmelCase = DataLoader(snake_case_ , shuffle=snake_case_ , batch_size=snake_case_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( snake_case_ :str , snake_case_ :Optional[int] , snake_case_ :List[str] , snake_case_ :str , snake_case_ :Optional[int] , snake_case_ :Optional[int]=None ):
__UpperCAmelCase = []
for epoch in range(snake_case_ ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCAmelCase , __UpperCAmelCase = batch
__UpperCAmelCase = model(snake_case_ )
__UpperCAmelCase = torch.nn.functional.mse_loss(snake_case_ , snake_case_ )
accelerator.backward(snake_case_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _UpperCAmelCase ( nn.Module ):
def __init__( self : int ):
super().__init__()
__UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
__UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
def a ( self : Dict , _lowercase : List[Any] ):
return x * self.a + self.b
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(total_limit=1 , project_dir=_lowercase , automatic_checkpoint_naming=_lowercase )
# Train baseline
__UpperCAmelCase = Accelerator(project_config=_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
# Train baseline
__UpperCAmelCase = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
__UpperCAmelCase = os.path.join(_lowercase , '''initial''' )
accelerator.save_state(_lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
__UpperCAmelCase = train(3 , _lowercase , _lowercase , _lowercase , _lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = Accelerator()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.load_state(_lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
__UpperCAmelCase = train(2 , _lowercase , _lowercase , _lowercase , _lowercase )
# Save everything
__UpperCAmelCase = os.path.join(_lowercase , '''checkpoint''' )
accelerator.save_state(_lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(_lowercase )
test_rands += train(1 , _lowercase , _lowercase , _lowercase , _lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=_lowercase )
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=_lowercase , project_config=_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
((__UpperCAmelCase) , (__UpperCAmelCase)) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
__UpperCAmelCase = train(3 , _lowercase , _lowercase , _lowercase , _lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowercase )
__UpperCAmelCase = Accelerator(project_dir=_lowercase , project_config=_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.load_state(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
__UpperCAmelCase = train(2 , _lowercase , _lowercase , _lowercase , _lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , _lowercase , _lowercase , _lowercase , _lowercase )
((__UpperCAmelCase) , (__UpperCAmelCase)) = model.a.item(), model.b.item()
__UpperCAmelCase = optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase = torch.tensor([1, 2, 3] )
__UpperCAmelCase = torch.tensor([2, 3, 4] )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(net.parameters() )
__UpperCAmelCase = Accelerator()
with self.assertRaises(_lowercase ) as ve:
accelerator.register_for_checkpointing(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(_lowercase , step_size=1 , gamma=0.99 )
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=_lowercase )
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=_lowercase , project_config=_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
__UpperCAmelCase = scheduler.state_dict()
train(3 , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertNotEqual(_lowercase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(_lowercase , scheduler.state_dict() )
def a ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=_lowercase , total_limit=2 )
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=_lowercase , project_config=_lowercase )
__UpperCAmelCase = accelerator.prepare(_lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def a ( self : str ):
__UpperCAmelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowercase , env=os.environ.copy() )
if __name__ == "__main__":
_lowercase : Union[str, Any] = '/tmp/accelerate/state_checkpointing'
_lowercase : int = DummyModel()
_lowercase : str = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_lowercase : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowercase ,_lowercase : Optional[int] = dummy_dataloaders()
_lowercase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowercase : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase : int = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowercase ,_lowercase : Optional[int] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowercase : List[str] = group['params'][0].device
break
assert param_device.type == accelerator.device.type
_lowercase : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
_lowercase : Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
_lowercase : int = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 49 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 1 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase__ ( snake_case_ :Dict ):
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def lowercase__ ( snake_case_ :Tuple ):
class _UpperCAmelCase :
def __init__( self : Any , _lowercase : str ):
__UpperCAmelCase = metric_id
class _UpperCAmelCase :
a__ : int = [MetricMock(_lowerCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def a ( self : int ):
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Optional[int] , snake_case_ :str , snake_case_ :Any , snake_case_ :List[str] ):
if "tmp_path" in args:
__UpperCAmelCase = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(snake_case_ , match='''https://huggingface.co/docs/evaluate''' ):
func(*snake_case_ )
| 49 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Tuple = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[int] = "megatron-bert"
def __init__( self : Union[str, Any] , _lowercase : str=2_90_56 , _lowercase : int=10_24 , _lowercase : Any=24 , _lowercase : Optional[int]=16 , _lowercase : Union[str, Any]=40_96 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=0.1 , _lowercase : Dict=0.1 , _lowercase : str=5_12 , _lowercase : Optional[int]=2 , _lowercase : List[Any]=0.02 , _lowercase : List[Any]=1E-12 , _lowercase : Any=0 , _lowercase : Dict="absolute" , _lowercase : Optional[int]=True , **_lowercase : Optional[Any] , ):
super().__init__(pad_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 49 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 1 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_lowercase : Dict = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Dict , _lowercase : Any , _lowercase : Any , _lowercase : List[Any]=None , _lowercase : str=1 ):
__UpperCAmelCase = tokenizer
__UpperCAmelCase = dataset
__UpperCAmelCase = len(_lowercase ) if n_tasks is None else n_tasks
__UpperCAmelCase = n_copies
def __iter__( self : int ):
__UpperCAmelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
__UpperCAmelCase = self.tokenizer(_lowercase , padding=_lowercase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : int ):
__UpperCAmelCase = start_length
__UpperCAmelCase = eof_strings
__UpperCAmelCase = tokenizer
def __call__( self : Tuple , _lowercase : List[str] , _lowercase : List[str] , **_lowercase : Optional[int] ):
__UpperCAmelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__UpperCAmelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_lowercase )
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = re.split('''(%s)''' % '''|'''.join(snake_case_ ) , snake_case_ )
# last string should be ""
return "".join(string_list[:-2] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :str , snake_case_ :List[str] , snake_case_ :Any , snake_case_ :Tuple=20 , **snake_case_ :int ):
__UpperCAmelCase = defaultdict(snake_case_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(snake_case_ ) ):
with torch.no_grad():
__UpperCAmelCase = batch['''ids'''].shape[-1]
__UpperCAmelCase = accelerator.unwrap_model(snake_case_ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=snake_case_ , **snake_case_ )
# each task is generated batch_size times
__UpperCAmelCase = batch['''task_id'''].repeat(snake_case_ )
__UpperCAmelCase = accelerator.pad_across_processes(
snake_case_ , dim=1 , pad_index=tokenizer.pad_token_id )
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((generated_tokens, generated_tasks) )
__UpperCAmelCase = generated_tokens.cpu().numpy()
__UpperCAmelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(snake_case_ , snake_case_ ):
gen_token_dict[task].append(snake_case_ )
__UpperCAmelCase = [[] for _ in range(snake_case_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__UpperCAmelCase = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
code_gens[task].append(remove_last_block(snake_case_ ) )
return code_gens
def lowercase__ ( ):
# Setup configuration
__UpperCAmelCase = HfArgumentParser(snake_case_ )
__UpperCAmelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__UpperCAmelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__UpperCAmelCase = '''false'''
if args.num_workers is None:
__UpperCAmelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__UpperCAmelCase = Accelerator()
set_seed(args.seed , device_specific=snake_case_ )
# Load model and tokenizer
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
__UpperCAmelCase = tokenizer.eos_token
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__UpperCAmelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , snake_case_ , snake_case_ )] ),
}
# Load evaluation dataset and metric
__UpperCAmelCase = load_dataset('''openai_humaneval''' )
__UpperCAmelCase = load_metric('''code_eval''' )
__UpperCAmelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
__UpperCAmelCase = args.n_samples // args.batch_size
__UpperCAmelCase = TokenizedDataset(snake_case_ , human_eval['''test'''] , n_copies=snake_case_ , n_tasks=snake_case_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__UpperCAmelCase = DataLoader(snake_case_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__UpperCAmelCase = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(snake_case_ , snake_case_ )
__UpperCAmelCase = complete_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , n_tasks=snake_case_ , batch_size=args.batch_size , **snake_case_ , )
if accelerator.is_main_process:
__UpperCAmelCase = []
for task in tqdm(range(snake_case_ ) ):
__UpperCAmelCase = human_eval['''test'''][task]['''test''']
__UpperCAmelCase = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
__UpperCAmelCase , __UpperCAmelCase = code_eval_metric.compute(
references=snake_case_ , predictions=snake_case_ , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(snake_case_ , snake_case_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Any = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['LayoutLMv2FeatureExtractor']
_lowercase : Any = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : int , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = jnp.ones((batch_size, length) ) / length
return scores
def a ( self : str ):
__UpperCAmelCase = None
__UpperCAmelCase = 20
__UpperCAmelCase = self._get_uniform_logits(batch_size=2 , length=_lowercase )
# tweak scores to not be uniform anymore
__UpperCAmelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__UpperCAmelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__UpperCAmelCase = jax.nn.softmax(_lowercase , axis=-1 )
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__UpperCAmelCase = jax.nn.softmax(temp_dist_warper_sharper(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
__UpperCAmelCase = jax.nn.softmax(temp_dist_warper_smoother(_lowercase , scores.copy() , cur_len=_lowercase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def a ( self : List[Any] ):
__UpperCAmelCase = None
__UpperCAmelCase = 10
__UpperCAmelCase = 2
# create ramp distribution
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy()
__UpperCAmelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__UpperCAmelCase = 5
__UpperCAmelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, length) ).copy()
__UpperCAmelCase = top_k_warp_safety_check(_lowercase , _lowercase , cur_len=_lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def a ( self : str ):
__UpperCAmelCase = None
__UpperCAmelCase = 10
__UpperCAmelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__UpperCAmelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
__UpperCAmelCase = np.exp(top_p_warp(_lowercase , _lowercase , cur_len=_lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__UpperCAmelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__UpperCAmelCase = np.broadcast_to(np.arange(_lowercase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__UpperCAmelCase = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__UpperCAmelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def a ( self : List[str] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
# check that min length is applied at length 5
__UpperCAmelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__UpperCAmelCase = 5
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = 15
__UpperCAmelCase = min_dist_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : List[Any] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
# check that all scores are -inf except the bos_token_id score
__UpperCAmelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__UpperCAmelCase = 1
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__UpperCAmelCase = 3
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : Optional[int] ):
__UpperCAmelCase = 20
__UpperCAmelCase = 4
__UpperCAmelCase = 0
__UpperCAmelCase = 5
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
__UpperCAmelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__UpperCAmelCase = 4
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__UpperCAmelCase = 3
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = logits_processor(_lowercase , _lowercase , cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def a ( self : Any ):
__UpperCAmelCase = 4
__UpperCAmelCase = 10
__UpperCAmelCase = 15
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 15
# dummy input_ids and scores
__UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _lowercase )
__UpperCAmelCase = input_ids.copy()
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = scores.copy()
# instantiate all dist processors
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
__UpperCAmelCase = 10
# no processor list
__UpperCAmelCase = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
# with processor list
__UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase = processor(_lowercase , _lowercase , cur_len=_lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def a ( self : int ):
__UpperCAmelCase = 4
__UpperCAmelCase = 10
__UpperCAmelCase = 15
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 15
# dummy input_ids and scores
__UpperCAmelCase = ids_tensor((batch_size, sequence_length) , _lowercase )
__UpperCAmelCase = input_ids.copy()
__UpperCAmelCase = self._get_uniform_logits(_lowercase , _lowercase )
__UpperCAmelCase = scores.copy()
# instantiate all dist processors
__UpperCAmelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__UpperCAmelCase = FlaxTopKLogitsWarper(3 )
__UpperCAmelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__UpperCAmelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
__UpperCAmelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase , eos_token_id=_lowercase )
__UpperCAmelCase = 10
# no processor list
def run_no_processor_list(_lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = temp_dist_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_k_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = top_p_warp(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = min_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = bos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
__UpperCAmelCase = eos_dist_proc(_lowercase , _lowercase , cur_len=_lowercase )
return scores
# with processor list
def run_processor_list(_lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : int ):
__UpperCAmelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__UpperCAmelCase = processor(_lowercase , _lowercase , cur_len=_lowercase )
return scores
__UpperCAmelCase = jax.jit(_lowercase )
__UpperCAmelCase = jax.jit(_lowercase )
__UpperCAmelCase = jitted_run_no_processor_list(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = jitted_run_processor_list(_lowercase , _lowercase , _lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 1 |
"""simple docstring"""
import operator as op
_lowercase : List[str] = 'scaler.pt'
_lowercase : Tuple = 'pytorch_model'
_lowercase : Tuple = 'random_states'
_lowercase : Optional[Any] = 'optimizer'
_lowercase : str = 'scheduler'
_lowercase : List[str] = 'pytorch_model.bin'
_lowercase : Dict = 'pytorch_model.bin.index.json'
_lowercase : Any = 'model.safetensors'
_lowercase : Dict = 'model.safetensors.index.json'
_lowercase : Optional[int] = '1.10.2'
_lowercase : Tuple = 'py38'
_lowercase : Dict = '4.17.0'
_lowercase : List[Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
_lowercase : Optional[Any] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
_lowercase : List[str] = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
_lowercase : List[str] = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
_lowercase : Optional[Any] = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
_lowercase : Optional[Any] = '2.0.1'
_lowercase : Tuple = ['pdsh', 'standard', 'openmpi', 'mvapich']
_lowercase : Tuple = ['default', 'reduce-overhead', 'max-autotune']
_lowercase : List[Any] = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_lowercase : Dict = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
_lowercase : int = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
_lowercase : str = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : List[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ):
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Any , _lowercase : int = 1 , _lowercase : int = 1_00 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : Optional[float] = None , _lowercase : bool = True , ):
if audio_length_in_s is None:
__UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
__UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
__UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__UpperCAmelCase = int(_lowercase )
if sample_size % down_scale_factor != 0:
__UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
''' process.''' )
__UpperCAmelCase = int(_lowercase )
__UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
__UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
# set step values
self.scheduler.set_timesteps(_lowercase , device=audio.device )
__UpperCAmelCase = self.scheduler.timesteps.to(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. compute previous image: x_t -> t_t-1
__UpperCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
__UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowercase )
| 49 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 1 |
"""simple docstring"""
import operator as op
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = []
__UpperCAmelCase = lambda snake_case_ , snake_case_ : int(x / y ) # noqa: E731 integer division operation
__UpperCAmelCase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(snake_case_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(snake_case_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
else:
__UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
__UpperCAmelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(snake_case_ ) , int(snake_case_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(snake_case_ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
_lowercase : Any = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowercase__ ( snake_case_ :float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowercase__ ( snake_case_ :float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def lowercase__ ( snake_case_ :float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
__UpperCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(snake_case_ , 2 ) * torus_radius * tube_radius
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def lowercase__ ( snake_case_ :float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
__UpperCAmelCase = (sidea + sidea + sidea) / 2
__UpperCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def lowercase__ ( snake_case_ :float , snake_case_ :float , snake_case_ :float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def lowercase__ ( snake_case_ :float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def lowercase__ ( snake_case_ :int , snake_case_ :float ):
if not isinstance(snake_case_ , snake_case_ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 49 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : int=7 , _lowercase : Union[str, Any]=3 , _lowercase : List[str]=10 , _lowercase : Optional[Any]=18 , _lowercase : int=30 , _lowercase : Optional[int]=4_00 , _lowercase : List[Any]=True , _lowercase : int=None , _lowercase : List[Any]=True , _lowercase : List[Any]=[0.5, 0.5, 0.5] , _lowercase : Tuple=[0.5, 0.5, 0.5] , _lowercase : Tuple=None , ):
__UpperCAmelCase = size if size is not None else {'''shortest_edge''': 18}
__UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = num_frames
__UpperCAmelCase = image_size
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = crop_size
def a ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = VivitImageProcessor if is_vision_available() else None
def a ( self : List[Any] ):
__UpperCAmelCase = VivitImageProcessingTester(self )
@property
def a ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : Optional[int] ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
def a ( self : int ):
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def a ( self : str ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for video in video_inputs:
self.assertIsInstance(_lowercase , _lowercase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a ( self : Dict ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for video in video_inputs:
self.assertIsInstance(_lowercase , _lowercase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a ( self : List[str] ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for video in video_inputs:
self.assertIsInstance(_lowercase , _lowercase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 49 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Dict = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[Any] = "roberta-prelayernorm"
def __init__( self : int , _lowercase : str=5_02_65 , _lowercase : Optional[Any]=7_68 , _lowercase : List[Any]=12 , _lowercase : Dict=12 , _lowercase : int=30_72 , _lowercase : Dict="gelu" , _lowercase : str=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[int]=5_12 , _lowercase : int=2 , _lowercase : Tuple=0.02 , _lowercase : Tuple=1E-12 , _lowercase : Optional[int]=1 , _lowercase : List[Any]=0 , _lowercase : Dict=2 , _lowercase : Dict="absolute" , _lowercase : Any=True , _lowercase : str=None , **_lowercase : List[str] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Union[str, Any] ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 1 |
"""simple docstring"""
_lowercase : Dict = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] = [None] * 10_00_00_00
_lowercase : List[Any] = True
_lowercase : Any = False
def lowercase__ ( snake_case_ :int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__UpperCAmelCase = chain(next_number(snake_case_ ) )
__UpperCAmelCase = number_chain
while number < 10_000_000:
__UpperCAmelCase = number_chain
number *= 10
return number_chain
def lowercase__ ( snake_case_ :int = 10_000_000 ):
for i in range(1 , snake_case_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 49 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 1 |
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase__ ( snake_case_ :Union[str, Any] ):
if not is_accelerate_available():
return method
__UpperCAmelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self :List[Any] , *snake_case_ :Optional[int] , **snake_case_ :Union[str, Any] ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *snake_case_ , **snake_case_ )
return wrapper
| 49 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase :
def __init__( self : Optional[int] , _lowercase : Optional[int] , _lowercase : Optional[Any]=13 , _lowercase : Union[str, Any]=10 , _lowercase : Dict=3 , _lowercase : List[Any]=2 , _lowercase : Optional[int]=2 , _lowercase : Tuple=2 , _lowercase : Optional[int]=True , _lowercase : Dict=True , _lowercase : Optional[Any]=32 , _lowercase : str=5 , _lowercase : Any=4 , _lowercase : Optional[int]=37 , _lowercase : Optional[Any]="gelu" , _lowercase : int=0.1 , _lowercase : Dict=0.1 , _lowercase : List[str]=10 , _lowercase : Tuple=0.02 , _lowercase : List[Any]=0.9 , _lowercase : Tuple=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_size
__UpperCAmelCase = tubelet_size
__UpperCAmelCase = num_frames
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = mask_ratio
__UpperCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__UpperCAmelCase = (image_size // patch_size) ** 2
__UpperCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__UpperCAmelCase = int(mask_ratio * self.seq_length )
def a ( self : Optional[Any] ):
__UpperCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : List[str] ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def a ( self : int , _lowercase : List[str] , _lowercase : str , _lowercase : Any ):
__UpperCAmelCase = VideoMAEModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Optional[int] ):
__UpperCAmelCase = VideoMAEForPreTraining(_lowercase )
model.to(_lowercase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__UpperCAmelCase = torch.ones((self.num_masks,) )
__UpperCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__UpperCAmelCase = mask.expand(self.batch_size , -1 ).bool()
__UpperCAmelCase = model(_lowercase , _lowercase )
# model only returns predictions for masked patches
__UpperCAmelCase = mask.sum().item()
__UpperCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def a ( self : List[Any] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Union[str, Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a__ : Dict = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a__ : Any = False
a__ : int = False
a__ : int = False
a__ : Tuple = False
def a ( self : List[Any] ):
__UpperCAmelCase = VideoMAEModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : int , _lowercase : Optional[Any]=False ):
__UpperCAmelCase = copy.deepcopy(_lowercase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__UpperCAmelCase = torch.ones((self.model_tester.num_masks,) )
__UpperCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__UpperCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
__UpperCAmelCase = bool_masked_pos.to(_lowercase )
if return_labels:
if model_class in [
*get_values(_lowercase ),
]:
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def a ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def a ( self : Optional[Any] ):
pass
def a ( self : Any ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def a ( self : Optional[int] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a ( self : List[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
@slow
def a ( self : Dict ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = VideoMAEModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def a ( self : Union[str, Any] ):
if not self.has_attentions:
pass
else:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = True
for model_class in self.all_model_classes:
__UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
__UpperCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase = True
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCAmelCase = len(_lowercase )
# Check attention is always last and order is fine
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + 1 , len(_lowercase ) )
__UpperCAmelCase = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def a ( self : Dict ):
def check_hidden_states_output(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ):
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowercase ) , _lowercase )
__UpperCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
__UpperCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a ( self : Dict ):
pass
def lowercase__ ( ):
__UpperCAmelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__UpperCAmelCase = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def a ( self : Optional[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def a ( self : List[str] ):
__UpperCAmelCase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
_lowercase )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_video()
__UpperCAmelCase = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase )
# verify the logits
__UpperCAmelCase = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , _lowercase )
__UpperCAmelCase = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@slow
def a ( self : List[Any] ):
__UpperCAmelCase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(_lowercase )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_video()
__UpperCAmelCase = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
# add boolean mask, indicating which patches to mask
__UpperCAmelCase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
__UpperCAmelCase = torch.load(_lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase )
# verify the logits
__UpperCAmelCase = torch.Size([1, 14_08, 15_36] )
__UpperCAmelCase = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=_lowercase )
self.assertEqual(outputs.logits.shape , _lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__UpperCAmelCase = torch.tensor([0.5_142] , device=_lowercase )
self.assertTrue(torch.allclose(outputs.loss , _lowercase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__UpperCAmelCase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=_lowercase ).to(
_lowercase )
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase )
__UpperCAmelCase = torch.tensor(torch.tensor([0.6_469] ) , device=_lowercase )
self.assertTrue(torch.allclose(outputs.loss , _lowercase , atol=1E-4 ) )
| 49 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase : int = logging.get_logger(__name__)
_lowercase : Any = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "conditional_detr"
a__ : Tuple = ["past_key_values"]
a__ : Optional[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=3 , _lowercase : Optional[Any]=3_00 , _lowercase : Tuple=6 , _lowercase : Optional[int]=20_48 , _lowercase : List[str]=8 , _lowercase : Dict=6 , _lowercase : Any=20_48 , _lowercase : str=8 , _lowercase : Any=0.0 , _lowercase : Optional[int]=0.0 , _lowercase : Tuple=True , _lowercase : Optional[Any]="relu" , _lowercase : str=2_56 , _lowercase : List[Any]=0.1 , _lowercase : List[Any]=0.0 , _lowercase : str=0.0 , _lowercase : Optional[int]=0.02 , _lowercase : Tuple=1.0 , _lowercase : List[str]=False , _lowercase : Tuple="sine" , _lowercase : Any="resnet50" , _lowercase : Tuple=True , _lowercase : Optional[Any]=False , _lowercase : Dict=2 , _lowercase : str=5 , _lowercase : Union[str, Any]=2 , _lowercase : List[str]=1 , _lowercase : Union[str, Any]=1 , _lowercase : Tuple=2 , _lowercase : str=5 , _lowercase : Optional[Any]=2 , _lowercase : Optional[int]=0.25 , **_lowercase : Tuple , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__UpperCAmelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = backbone_config.get('''model_type''' )
__UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase = config_class.from_dict(_lowercase )
__UpperCAmelCase = use_timm_backbone
__UpperCAmelCase = backbone_config
__UpperCAmelCase = num_channels
__UpperCAmelCase = num_queries
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = init_xavier_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = auxiliary_loss
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = backbone
__UpperCAmelCase = use_pretrained_backbone
__UpperCAmelCase = dilation
# Hungarian matcher
__UpperCAmelCase = class_cost
__UpperCAmelCase = bbox_cost
__UpperCAmelCase = giou_cost
# Loss coefficients
__UpperCAmelCase = mask_loss_coefficient
__UpperCAmelCase = dice_loss_coefficient
__UpperCAmelCase = cls_loss_coefficient
__UpperCAmelCase = bbox_loss_coefficient
__UpperCAmelCase = giou_loss_coefficient
__UpperCAmelCase = focal_alpha
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def a ( self : Tuple ):
return self.encoder_attention_heads
@property
def a ( self : Optional[int] ):
return self.d_model
def a ( self : Any ):
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__UpperCAmelCase = self.backbone_config.to_dict()
__UpperCAmelCase = self.__class__.model_type
return output
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = version.parse("1.11" )
@property
def a ( self : Tuple ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def a ( self : Optional[Any] ):
return 1E-5
@property
def a ( self : Optional[int] ):
return 12
| 49 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 1 |
"""simple docstring"""
import re
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = re.compile(r'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(snake_case_ , snake_case_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 49 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : List[str] = '▁'
_lowercase : Optional[int] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_lowercase : str = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_lowercase : Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
_lowercase : Optional[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_lowercase : Union[str, Any] = {'mustc': MUSTC_LANGS}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = MAX_MODEL_INPUT_SIZES
a__ : int = ["input_ids", "attention_mask"]
a__ : List[int] = []
def __init__( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Optional[Any]="<s>" , _lowercase : str="</s>" , _lowercase : List[Any]="<pad>" , _lowercase : Tuple="<unk>" , _lowercase : Union[str, Any]=False , _lowercase : Union[str, Any]=False , _lowercase : List[Any]=None , _lowercase : int=None , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : List[Any] , ):
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , do_upper_case=_lowercase , do_lower_case=_lowercase , tgt_lang=_lowercase , lang_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
__UpperCAmelCase = do_upper_case
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = load_json(_lowercase )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = spm_file
__UpperCAmelCase = load_spm(_lowercase , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCAmelCase = lang_codes
__UpperCAmelCase = LANGUAGES[lang_codes]
__UpperCAmelCase = [F'''<lang:{lang}>''' for lang in self.langs]
__UpperCAmelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__UpperCAmelCase = self.lang_tokens
__UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCAmelCase = {}
@property
def a ( self : List[str] ):
return len(self.encoder )
@property
def a ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def a ( self : Optional[int] , _lowercase : Optional[int] ):
__UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowercase )
def a ( self : Any , _lowercase : str ):
__UpperCAmelCase = self.lang_code_to_id[tgt_lang]
__UpperCAmelCase = [lang_code_id]
def a ( self : str , _lowercase : str ):
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def a ( self : List[Any] , _lowercase : Tuple ):
return self.encoder.get(_lowercase , self.encoder[self.unk_token] )
def a ( self : List[Any] , _lowercase : int ):
return self.decoder.get(_lowercase , self.unk_token )
def a ( self : Dict , _lowercase : List[str] ):
__UpperCAmelCase = []
__UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCAmelCase = self.sp_model.decode(_lowercase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCAmelCase = []
else:
current_sub_tokens.append(_lowercase )
__UpperCAmelCase = self.sp_model.decode(_lowercase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def a ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Any=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def a ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
__UpperCAmelCase = [1] * len(self.prefix_tokens )
__UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def a ( self : Any ):
__UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Any , _lowercase : Dict ):
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = Path(_lowercase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowercase )
elif not os.path.isfile(self.spm_file ):
with open(_lowercase , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (str(_lowercase ), str(_lowercase ))
def lowercase__ ( snake_case_ :str , snake_case_ :Dict[str, Any] ):
__UpperCAmelCase = sentencepiece.SentencePieceProcessor(**snake_case_ )
spm.Load(str(snake_case_ ) )
return spm
def lowercase__ ( snake_case_ :str ):
with open(snake_case_ , '''r''' ) as f:
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Dict , snake_case_ :str ):
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ , indent=2 )
| 49 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[Any] = "vit_msn"
def __init__( self : Union[str, Any] , _lowercase : List[str]=7_68 , _lowercase : Any=12 , _lowercase : str=12 , _lowercase : str=30_72 , _lowercase : Dict="gelu" , _lowercase : str=0.0 , _lowercase : Union[str, Any]=0.0 , _lowercase : List[str]=0.02 , _lowercase : Any=1E-06 , _lowercase : str=2_24 , _lowercase : List[Any]=16 , _lowercase : Optional[int]=3 , _lowercase : List[Any]=True , **_lowercase : Optional[int] , ):
super().__init__(**_lowercase )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = qkv_bias
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : Any = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ):
if isinstance(snake_case_ , np.ndarray ):
return list(tensor.shape )
__UpperCAmelCase = tf.shape(snake_case_ )
if tensor.shape == tf.TensorShape(snake_case_ ):
return dynamic
__UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )]
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Any , snake_case_ :Optional[Any] , snake_case_ :List[Any]=1E-5 , snake_case_ :Optional[Any]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__UpperCAmelCase = [1] * inputs.shape.rank
__UpperCAmelCase = shape_list(snake_case_ )[axis]
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
# Compute layer normalization using the batch_normalization
# function.
__UpperCAmelCase = tf.nn.batch_normalization(
snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , )
return outputs
def lowercase__ ( snake_case_ :Any , snake_case_ :str=0 , snake_case_ :Union[str, Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__UpperCAmelCase = tf.shape(snake_case_ )
__UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :tf.Tensor ):
if not isinstance(snake_case_ , tf.Tensor ):
__UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ):
tf.debugging.assert_less(
snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :int , snake_case_ :str ):
__UpperCAmelCase = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
__UpperCAmelCase = np.asarray(snake_case_ )
__UpperCAmelCase = 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case_ ):
__UpperCAmelCase = chunk_data
else:
__UpperCAmelCase = data
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Dict ):
if name in group.attrs:
__UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__UpperCAmelCase = []
__UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase__ ( snake_case_ :Dict ):
def _expand_single_ad_tensor(snake_case_ :Tuple ):
if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
| 49 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 1 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "autoformer"
a__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Tuple , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , _lowercase : str = "student_t" , _lowercase : str = "nll" , _lowercase : int = 1 , _lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , _lowercase : bool = True , _lowercase : int = 0 , _lowercase : int = 0 , _lowercase : int = 0 , _lowercase : int = 0 , _lowercase : Optional[List[int]] = None , _lowercase : Optional[List[int]] = None , _lowercase : int = 64 , _lowercase : int = 2 , _lowercase : int = 2 , _lowercase : int = 2 , _lowercase : int = 2 , _lowercase : int = 32 , _lowercase : int = 32 , _lowercase : str = "gelu" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : int = 1_00 , _lowercase : float = 0.02 , _lowercase : bool = True , _lowercase : Optional[Any]=True , _lowercase : int = 10 , _lowercase : int = 25 , _lowercase : int = 3 , **_lowercase : List[Any] , ):
# time series specific configuration
__UpperCAmelCase = prediction_length
__UpperCAmelCase = context_length if context_length is not None else prediction_length
__UpperCAmelCase = distribution_output
__UpperCAmelCase = loss
__UpperCAmelCase = input_size
__UpperCAmelCase = num_time_features
__UpperCAmelCase = lags_sequence
__UpperCAmelCase = scaling
__UpperCAmelCase = num_dynamic_real_features
__UpperCAmelCase = num_static_real_features
__UpperCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = cardinality
else:
__UpperCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_lowercase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = embedding_dimension
else:
__UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
__UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = use_cache
# Autoformer
__UpperCAmelCase = label_length
__UpperCAmelCase = moving_average
__UpperCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def a ( self : int ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 49 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 1 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def lowercase__ ( snake_case_ :int ):
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(snake_case_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Union[str, Any] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class _UpperCAmelCase :
def __init__( self : Dict ):
__UpperCAmelCase = {}
def a ( self : Any , _lowercase : Any , _lowercase : str , _lowercase : List[str]=1 ):
if self.graph.get(_lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__UpperCAmelCase = [[w, v]]
if not self.graph.get(_lowercase ):
__UpperCAmelCase = []
def a ( self : Tuple ):
return list(self.graph )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Union[str, Any] ):
if self.graph.get(_lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowercase )
def a ( self : List[Any] , _lowercase : Optional[Any]=-2 , _lowercase : int=-1 ):
if s == d:
return []
__UpperCAmelCase = []
__UpperCAmelCase = []
if s == -2:
__UpperCAmelCase = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
__UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowercase ) != 0:
__UpperCAmelCase = stack[len(_lowercase ) - 1]
else:
__UpperCAmelCase = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return visited
def a ( self : int , _lowercase : str=-1 ):
if c == -1:
__UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(_lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowercase , _lowercase , 1 )
def a ( self : str , _lowercase : int=-2 ):
__UpperCAmelCase = deque()
__UpperCAmelCase = []
if s == -2:
__UpperCAmelCase = list(self.graph )[0]
d.append(_lowercase )
visited.append(_lowercase )
while d:
__UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a ( self : Union[str, Any] , _lowercase : Optional[Any] ):
__UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a ( self : int , _lowercase : str ):
return len(self.graph[u] )
def a ( self : Optional[Any] , _lowercase : Tuple=-2 ):
__UpperCAmelCase = []
__UpperCAmelCase = []
if s == -2:
__UpperCAmelCase = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
__UpperCAmelCase = s
__UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_lowercase ) != 0:
__UpperCAmelCase = stack[len(_lowercase ) - 1]
else:
__UpperCAmelCase = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return sorted_nodes
def a ( self : Optional[int] ):
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
__UpperCAmelCase = -2
__UpperCAmelCase = []
__UpperCAmelCase = s
__UpperCAmelCase = False
__UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCAmelCase = len(_lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCAmelCase = True
if len(_lowercase ) != 0:
__UpperCAmelCase = stack[len(_lowercase ) - 1]
else:
__UpperCAmelCase = False
indirect_parents.append(_lowercase )
__UpperCAmelCase = s
__UpperCAmelCase = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return list(_lowercase )
def a ( self : Dict ):
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
__UpperCAmelCase = -2
__UpperCAmelCase = []
__UpperCAmelCase = s
__UpperCAmelCase = False
__UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCAmelCase = len(_lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCAmelCase = True
if len(_lowercase ) != 0:
__UpperCAmelCase = stack[len(_lowercase ) - 1]
else:
__UpperCAmelCase = False
indirect_parents.append(_lowercase )
__UpperCAmelCase = s
__UpperCAmelCase = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return False
def a ( self : str , _lowercase : str=-2 , _lowercase : int=-1 ):
__UpperCAmelCase = time()
self.dfs(_lowercase , _lowercase )
__UpperCAmelCase = time()
return end - begin
def a ( self : Optional[int] , _lowercase : Optional[Any]=-2 ):
__UpperCAmelCase = time()
self.bfs(_lowercase )
__UpperCAmelCase = time()
return end - begin
class _UpperCAmelCase :
def __init__( self : Optional[int] ):
__UpperCAmelCase = {}
def a ( self : Any , _lowercase : str , _lowercase : int , _lowercase : str=1 ):
# check if the u exists
if self.graph.get(_lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(_lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__UpperCAmelCase = [[w, u]]
def a ( self : Any , _lowercase : Optional[Any] , _lowercase : List[str] ):
if self.graph.get(_lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowercase )
# the other way round
if self.graph.get(_lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_lowercase )
def a ( self : List[str] , _lowercase : Dict=-2 , _lowercase : Dict=-1 ):
if s == d:
return []
__UpperCAmelCase = []
__UpperCAmelCase = []
if s == -2:
__UpperCAmelCase = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
__UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowercase ) != 0:
__UpperCAmelCase = stack[len(_lowercase ) - 1]
else:
__UpperCAmelCase = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return visited
def a ( self : Union[str, Any] , _lowercase : Optional[int]=-1 ):
if c == -1:
__UpperCAmelCase = floor(random() * 1_00_00 ) + 10
for i in range(_lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
__UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowercase , _lowercase , 1 )
def a ( self : Tuple , _lowercase : Union[str, Any]=-2 ):
__UpperCAmelCase = deque()
__UpperCAmelCase = []
if s == -2:
__UpperCAmelCase = list(self.graph )[0]
d.append(_lowercase )
visited.append(_lowercase )
while d:
__UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a ( self : Optional[int] , _lowercase : int ):
return len(self.graph[u] )
def a ( self : List[str] ):
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
__UpperCAmelCase = -2
__UpperCAmelCase = []
__UpperCAmelCase = s
__UpperCAmelCase = False
__UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCAmelCase = len(_lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCAmelCase = True
if len(_lowercase ) != 0:
__UpperCAmelCase = stack[len(_lowercase ) - 1]
else:
__UpperCAmelCase = False
indirect_parents.append(_lowercase )
__UpperCAmelCase = s
__UpperCAmelCase = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return list(_lowercase )
def a ( self : str ):
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = list(self.graph )[0]
stack.append(_lowercase )
visited.append(_lowercase )
__UpperCAmelCase = -2
__UpperCAmelCase = []
__UpperCAmelCase = s
__UpperCAmelCase = False
__UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__UpperCAmelCase = len(_lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__UpperCAmelCase = True
if len(_lowercase ) != 0:
__UpperCAmelCase = stack[len(_lowercase ) - 1]
else:
__UpperCAmelCase = False
indirect_parents.append(_lowercase )
__UpperCAmelCase = s
__UpperCAmelCase = ss
# check if se have reached the starting point
if len(_lowercase ) == 0:
return False
def a ( self : Optional[int] ):
return list(self.graph )
def a ( self : Union[str, Any] , _lowercase : int=-2 , _lowercase : Dict=-1 ):
__UpperCAmelCase = time()
self.dfs(_lowercase , _lowercase )
__UpperCAmelCase = time()
return end - begin
def a ( self : int , _lowercase : Union[str, Any]=-2 ):
__UpperCAmelCase = time()
self.bfs(_lowercase )
__UpperCAmelCase = time()
return end - begin
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 1 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int ):
__UpperCAmelCase = order
# a_{0} ... a_{k}
__UpperCAmelCase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__UpperCAmelCase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__UpperCAmelCase = [0.0] * self.order
# y[n-1] ... y[n-k]
__UpperCAmelCase = [0.0] * self.order
def a ( self : Optional[int] , _lowercase : list[float] , _lowercase : list[float] ):
if len(_lowercase ) < self.order:
__UpperCAmelCase = [1.0, *a_coeffs]
if len(_lowercase ) != self.order + 1:
__UpperCAmelCase = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(_lowercase )}'''
)
raise ValueError(_lowercase )
if len(_lowercase ) != self.order + 1:
__UpperCAmelCase = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(_lowercase )}'''
)
raise ValueError(_lowercase )
__UpperCAmelCase = a_coeffs
__UpperCAmelCase = b_coeffs
def a ( self : str , _lowercase : float ):
__UpperCAmelCase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__UpperCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__UpperCAmelCase = self.input_history[:-1]
__UpperCAmelCase = self.output_history[:-1]
__UpperCAmelCase = sample
__UpperCAmelCase = result
return result
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 1 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCAmelCase ( _lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
a__ : Union[str, Any] = None
a__ : Optional[int] = None
@property
def a ( self : Dict ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowercase , '''feature_size''' ) )
self.assertTrue(hasattr(_lowercase , '''sampling_rate''' ) )
self.assertTrue(hasattr(_lowercase , '''padding_value''' ) )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowercase ) == len(_lowercase ) for x, y in zip(_lowercase , processed_features[input_name] ) ) )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
__UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
__UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def a ( self : int ):
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
__UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def a ( self : Tuple , _lowercase : Dict=False ):
def _inputs_have_equal_length(_lowercase : List[str] ):
__UpperCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_lowercase ) != length:
return False
return True
def _inputs_are_equal(_lowercase : Any , _lowercase : int ):
if len(_lowercase ) != len(_lowercase ):
return False
for input_slice_a, input_slice_a in zip(_lowercase , _lowercase ):
if not np.allclose(np.asarray(_lowercase ) , np.asarray(_lowercase ) , atol=1E-3 ):
return False
return True
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowercase )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = self.feat_extract_tester.seq_length_diff
__UpperCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
__UpperCAmelCase = self.feat_extract_tester.min_seq_length
__UpperCAmelCase = self.feat_extract_tester.batch_size
__UpperCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__UpperCAmelCase = feat_extract.pad(_lowercase , padding=_lowercase )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''np''' )
__UpperCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='''max_length''' )[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=_lowercase , return_tensors='''np''' )
__UpperCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__UpperCAmelCase = feat_extract.pad(_lowercase , pad_to_multiple_of=10 )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , pad_to_multiple_of=10 )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_lowercase )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_lowercase , return_tensors='''np''' , )
__UpperCAmelCase = input_a[input_name]
self.assertTrue(all(len(_lowercase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
__UpperCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowercase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__UpperCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def a ( self : Optional[int] , _lowercase : List[str]=False ):
def _inputs_have_equal_length(_lowercase : Dict ):
__UpperCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(_lowercase ) != length:
return False
return True
def _inputs_are_equal(_lowercase : Any , _lowercase : Any ):
if len(_lowercase ) != len(_lowercase ):
return False
for input_slice_a, input_slice_a in zip(_lowercase , _lowercase ):
if not np.allclose(np.asarray(_lowercase ) , np.asarray(_lowercase ) , atol=1E-3 ):
return False
return True
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowercase )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_lowercase )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
__UpperCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
# truncate to smallest with np
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_lowercase , )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
__UpperCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
# truncate to middle
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_lowercase , return_tensors='''np''' , )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_lowercase )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
__UpperCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , truncation=_lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='''longest''' , truncation=_lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='''longest''' , truncation=_lowercase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding='''max_length''' , truncation=_lowercase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__UpperCAmelCase = 12
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowercase , truncation=_lowercase , )
__UpperCAmelCase = input_a[input_name]
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowercase , )
__UpperCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__UpperCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__UpperCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
def a ( self : str ):
self._check_padding(numpify=_lowercase )
def a ( self : Optional[int] ):
self._check_padding(numpify=_lowercase )
def a ( self : List[Any] ):
self._check_truncation(numpify=_lowercase )
def a ( self : Dict ):
self._check_truncation(numpify=_lowercase )
@require_torch
def a ( self : Tuple ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''np''' )[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def a ( self : str ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''np''' )[input_name]
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.feat_extract_dict
__UpperCAmelCase = True
__UpperCAmelCase = self.feature_extraction_class(**_lowercase )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = [len(_lowercase ) for x in speech_inputs]
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = feat_extract.pad(_lowercase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _lowercase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowercase )
def a ( self : Any ):
__UpperCAmelCase = self.feat_extract_dict
__UpperCAmelCase = True
__UpperCAmelCase = self.feature_extraction_class(**_lowercase )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
__UpperCAmelCase = [len(_lowercase ) for x in speech_inputs]
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = min(_lowercase )
__UpperCAmelCase = feat_extract.pad(
_lowercase , padding='''max_length''' , max_length=_lowercase , truncation=_lowercase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _lowercase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : int = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ['MaskFormerFeatureExtractor']
_lowercase : int = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
_lowercase : List[str] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 1 |
"""simple docstring"""
import numpy as np
from PIL import Image
def lowercase__ ( snake_case_ :np.ndarray , snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = np.array(snake_case_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# compute the shape of the output matrix
__UpperCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__UpperCAmelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__UpperCAmelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
return updated_arr
def lowercase__ ( snake_case_ :np.ndarray , snake_case_ :int , snake_case_ :int ):
__UpperCAmelCase = np.array(snake_case_ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# compute the shape of the output matrix
__UpperCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__UpperCAmelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__UpperCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCAmelCase = 0
__UpperCAmelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
_lowercase : List[str] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
"""simple docstring"""
import os
from math import logaa
def lowercase__ ( snake_case_ :str = "base_exp.txt" ):
__UpperCAmelCase = 0
__UpperCAmelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
__UpperCAmelCase , __UpperCAmelCase = list(map(snake_case_ , line.split(''',''' ) ) )
if x * logaa(snake_case_ ) > largest:
__UpperCAmelCase = x * logaa(snake_case_ )
__UpperCAmelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowercase : Optional[int] = threading.Lock()
_lowercase : Optional[logging.Handler] = None
_lowercase : List[str] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
_lowercase : Optional[int] = logging.WARNING
_lowercase : Optional[int] = True
def lowercase__ ( ):
__UpperCAmelCase = os.getenv('''TRANSFORMERS_VERBOSITY''' , snake_case_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase__ ( ):
return __name__.split('''.''' )[0]
def lowercase__ ( ):
return logging.getLogger(_get_library_name() )
def lowercase__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__UpperCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
__UpperCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
__UpperCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__UpperCAmelCase = False
def lowercase__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
__UpperCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__UpperCAmelCase = None
def lowercase__ ( ):
return log_levels
def lowercase__ ( snake_case_ :Optional[str] = None ):
if name is None:
__UpperCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case_ )
def lowercase__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowercase__ ( snake_case_ :int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case_ )
def lowercase__ ( ):
return set_verbosity(snake_case_ )
def lowercase__ ( ):
return set_verbosity(snake_case_ )
def lowercase__ ( ):
return set_verbosity(snake_case_ )
def lowercase__ ( ):
return set_verbosity(snake_case_ )
def lowercase__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowercase__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowercase__ ( snake_case_ :logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case_ )
def lowercase__ ( snake_case_ :logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case_ )
def lowercase__ ( ):
_configure_library_root_logger()
__UpperCAmelCase = False
def lowercase__ ( ):
_configure_library_root_logger()
__UpperCAmelCase = True
def lowercase__ ( ):
__UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
__UpperCAmelCase = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case_ )
def lowercase__ ( self :List[str] , *snake_case_ :Dict , **snake_case_ :int ):
__UpperCAmelCase = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , snake_case_ )
if no_advisory_warnings:
return
self.warning(*snake_case_ , **snake_case_ )
_lowercase : str = warning_advice
@functools.lru_cache(snake_case_ )
def lowercase__ ( self :Union[str, Any] , *snake_case_ :Union[str, Any] , **snake_case_ :List[str] ):
self.warning(*snake_case_ , **snake_case_ )
_lowercase : str = warning_once
class _UpperCAmelCase :
def __init__( self : str , *_lowercase : int , **_lowercase : str ): # pylint: disable=unused-argument
__UpperCAmelCase = args[0] if args else None
def __iter__( self : Tuple ):
return iter(self._iterator )
def __getattr__( self : Any , _lowercase : Optional[int] ):
def empty_fn(*_lowercase : List[str] , **_lowercase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ):
return self
def __exit__( self : List[Any] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Dict ):
return
class _UpperCAmelCase :
def __call__( self : int , *_lowercase : Optional[int] , **_lowercase : List[Any] ):
if _tqdm_active:
return tqdm_lib.tqdm(*_lowercase , **_lowercase )
else:
return EmptyTqdm(*_lowercase , **_lowercase )
def a ( self : Optional[int] , *_lowercase : Optional[int] , **_lowercase : Any ):
__UpperCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowercase : Optional[Any] = _tqdm_cls()
def lowercase__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase__ ( ):
global _tqdm_active
__UpperCAmelCase = True
hf_hub_utils.enable_progress_bars()
def lowercase__ ( ):
global _tqdm_active
__UpperCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 49 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : List[Any] = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowercase__ ( snake_case_ :int , snake_case_ :Any ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowercase__ ( snake_case_ :Dict , snake_case_ :Tuple , snake_case_ :Tuple ):
__UpperCAmelCase = dct.pop(snake_case_ )
__UpperCAmelCase = val
def lowercase__ ( snake_case_ :Optional[Any] ):
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :str ):
__UpperCAmelCase = ViTConfig(image_size=384 , qkv_bias=snake_case_ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_024
__UpperCAmelCase = 4_096
__UpperCAmelCase = 24
__UpperCAmelCase = 16
__UpperCAmelCase = 1_024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_024
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(snake_case_ , add_pooling_layer=snake_case_ )
__UpperCAmelCase = TrOCRForCausalLM(snake_case_ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' , check_hash=snake_case_ )['''model''']
__UpperCAmelCase = create_rename_keys(snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(snake_case_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(snake_case_ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(snake_case_ , snake_case_ )
__UpperCAmelCase = processor(images=prepare_img(snake_case_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=snake_case_ , decoder_input_ids=snake_case_ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , snake_case_ , atol=1E-3 ), "First elements of logits not as expected"
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase : Any = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 49 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 1 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowercase : int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] , _lowercase : str , _lowercase : bool , _lowercase : str = None , _lowercase : list = None ):
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(_lowercase ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowercase , feature_script=_lowercase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(_lowercase , _lowercase ) , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase = '''\n'''.join(_lowercase )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(_lowercase , '''''' )
self.assertEqual(_lowercase , '''''' )
def a ( self : List[str] ):
self.one_complete_example('''complete_nlp_example.py''' , _lowercase )
self.one_complete_example('''complete_nlp_example.py''' , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _lowercase , _lowercase , _lowercase )
self.one_complete_example('''complete_cv_example.py''' , _lowercase , _lowercase , _lowercase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = False
@classmethod
def a ( cls : int ):
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def a ( cls : List[Any] ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def a ( self : List[str] ):
__UpperCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def a ( self : Optional[int] ):
__UpperCAmelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=_lowercase )
self.assertNotIn('''epoch 0:''' , _lowercase )
self.assertIn('''epoch 1:''' , _lowercase )
def a ( self : Any ):
__UpperCAmelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=_lowercase )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _lowercase )
self.assertIn('''epoch 1:''' , _lowercase )
else:
self.assertIn('''epoch 0:''' , _lowercase )
self.assertIn('''epoch 1:''' , _lowercase )
@slow
def a ( self : Any ):
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=_lowercase )
__UpperCAmelCase = re.findall('''({.+})''' , _lowercase )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(_lowercase )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def a ( self : List[str] ):
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def a ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''tracking''' ) ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def a ( self : List[str] ):
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 49 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowercase : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Union[str, Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a__ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a__ : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a__ : str = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def a ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = ZeroShotClassificationPipeline(
model=_lowercase , tokenizer=_lowercase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def a ( self : int , _lowercase : List[str] , _lowercase : Union[str, Any] ):
__UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_lowercase , {'''sequence''': ANY(_lowercase ), '''labels''': [ANY(_lowercase )], '''scores''': [ANY(_lowercase )]} )
# No kwarg
__UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_lowercase , {'''sequence''': ANY(_lowercase ), '''labels''': [ANY(_lowercase )], '''scores''': [ANY(_lowercase )]} )
__UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_lowercase , {'''sequence''': ANY(_lowercase ), '''labels''': [ANY(_lowercase )], '''scores''': [ANY(_lowercase )]} )
__UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_lowercase , {'''sequence''': ANY(_lowercase ), '''labels''': [ANY(_lowercase ), ANY(_lowercase )], '''scores''': [ANY(_lowercase ), ANY(_lowercase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__UpperCAmelCase = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_lowercase , {'''sequence''': ANY(_lowercase ), '''labels''': [ANY(_lowercase ), ANY(_lowercase )], '''scores''': [ANY(_lowercase ), ANY(_lowercase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
__UpperCAmelCase = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_lowercase , {'''sequence''': ANY(_lowercase ), '''labels''': [ANY(_lowercase )], '''scores''': [ANY(_lowercase )]} )
# https://github.com/huggingface/transformers/issues/13846
__UpperCAmelCase = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''labels''': [ANY(_lowercase ), ANY(_lowercase )], '''scores''': [ANY(_lowercase ), ANY(_lowercase )]}
for i in range(1 )
] , )
__UpperCAmelCase = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''labels''': [ANY(_lowercase ), ANY(_lowercase )], '''scores''': [ANY(_lowercase ), ANY(_lowercase )]}
for i in range(2 )
] , )
with self.assertRaises(_lowercase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_lowercase ):
classifier(_lowercase , candidate_labels='''politics''' )
with self.assertRaises(_lowercase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_lowercase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_lowercase )
with self.assertRaises(_lowercase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_lowercase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_lowercase , )
self.run_entailment_id(_lowercase )
def a ( self : Any , _lowercase : Pipeline ):
__UpperCAmelCase = zero_shot_classifier.model.config
__UpperCAmelCase = config.labelaid
__UpperCAmelCase = zero_shot_classifier.entailment_id
__UpperCAmelCase = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__UpperCAmelCase = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__UpperCAmelCase = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__UpperCAmelCase = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__UpperCAmelCase = original_labelaid
self.assertEqual(_lowercase , zero_shot_classifier.entailment_id )
@require_torch
def a ( self : Dict ):
__UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_00 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def a ( self : List[Any] ):
__UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
__UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowercase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def a ( self : Dict ):
__UpperCAmelCase = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
__UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowercase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def a ( self : Tuple ):
__UpperCAmelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
__UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowercase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__UpperCAmelCase = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def a ( self : Dict ):
__UpperCAmelCase = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
__UpperCAmelCase = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowercase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
__UpperCAmelCase = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 49 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Any = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Optional[int] = XLMRobertaTokenizer
a__ : List[Any] = XLMRobertaTokenizerFast
a__ : List[str] = True
a__ : Optional[int] = True
def a ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = XLMRobertaTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : List[Any] ):
__UpperCAmelCase = '''<pad>'''
__UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_lowercase ) , 10_02 )
def a ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def a ( self : Dict ):
__UpperCAmelCase = XLMRobertaTokenizer(_lowercase , keep_accents=_lowercase )
__UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def a ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@cached_property
def a ( self : Dict ):
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def a ( self : List[Any] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowercase , f.name )
__UpperCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=_lowercase )
__UpperCAmelCase = pickle.dumps(_lowercase )
pickle.loads(_lowercase )
def a ( self : List[str] ):
if not self.test_rust_tokenizer:
return
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
__UpperCAmelCase = tokenizer.tokenize(_lowercase )
__UpperCAmelCase = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(_lowercase )
__UpperCAmelCase = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def a ( self : Optional[int] ):
__UpperCAmelCase = '''Hello World!'''
__UpperCAmelCase = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def a ( self : str ):
__UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
__UpperCAmelCase = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def a ( self : Union[str, Any] ):
# fmt: off
__UpperCAmelCase = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
| 49 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : List[str] = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[str]=False ):
__UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase = ''''''
else:
__UpperCAmelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase = in_proj_bias[: config.hidden_size]
__UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :int , snake_case_ :str ):
__UpperCAmelCase = dct.pop(snake_case_ )
__UpperCAmelCase = val
def lowercase__ ( ):
__UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :List[str]=False ):
__UpperCAmelCase = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=snake_case_ , )
__UpperCAmelCase = ViTHybridConfig(backbone_config=snake_case_ , image_size=384 , num_labels=1_000 )
__UpperCAmelCase = False
# load original model from timm
__UpperCAmelCase = timm.create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
__UpperCAmelCase = create_rename_keys(snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = '''imagenet-1k-id2label.json'''
__UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__UpperCAmelCase = ViTHybridModel(snake_case_ ).eval()
else:
__UpperCAmelCase = ViTHybridForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# create image processor
__UpperCAmelCase = create_transform(**resolve_data_config({} , model=snake_case_ ) )
__UpperCAmelCase = transform.transforms
__UpperCAmelCase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
__UpperCAmelCase = ViTHybridImageProcessor(
do_resize=snake_case_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = transform(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = processor(snake_case_ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
__UpperCAmelCase = model(snake_case_ )
__UpperCAmelCase = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
__UpperCAmelCase = timm_model.forward_features(snake_case_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case_ , outputs.pooler_output , atol=1E-3 )
else:
__UpperCAmelCase = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
_lowercase : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 49 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 1 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_lowercase : str = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
_lowercase : Optional[int] = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
_lowercase : str = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :Dict ):
return float((preds == labels).mean() )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str ):
__UpperCAmelCase = simple_accuracy(snake_case_ , snake_case_ )
__UpperCAmelCase = float(fa_score(y_true=snake_case_ , y_pred=snake_case_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( snake_case_ :str , snake_case_ :List[Any] ):
__UpperCAmelCase = np.array(snake_case_ )
__UpperCAmelCase = np.array(snake_case_ )
__UpperCAmelCase = en_sentvecs.shape[0]
# mean centering
__UpperCAmelCase = en_sentvecs - np.mean(snake_case_ , axis=0 )
__UpperCAmelCase = in_sentvecs - np.mean(snake_case_ , axis=0 )
__UpperCAmelCase = cdist(snake_case_ , snake_case_ , '''cosine''' )
__UpperCAmelCase = np.array(range(snake_case_ ) )
__UpperCAmelCase = sim.argsort(axis=1 )[:, :10]
__UpperCAmelCase = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def a ( self : Union[str, Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def a ( self : Any , _lowercase : List[str] , _lowercase : int ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_lowercase , _lowercase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_lowercase , _lowercase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 49 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int , snake_case_ :int ):
while second != 0:
__UpperCAmelCase = first & second
first ^= second
__UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : List[Any] = int(input('Enter the first number: ').strip())
_lowercase : Tuple = int(input('Enter the second number: ').strip())
print(f"""{add(first, second) = }""")
| 49 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = [True] * limit
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__UpperCAmelCase = i * 2
while index < limit:
__UpperCAmelCase = False
__UpperCAmelCase = index + i
__UpperCAmelCase = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def lowercase__ ( snake_case_ :int = 1_000_000 ):
__UpperCAmelCase = prime_sieve(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
__UpperCAmelCase = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__UpperCAmelCase = j - i
__UpperCAmelCase = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowercase : Optional[int] = 'src/transformers'
_lowercase : List[Any] = 'docs/source/en/tasks'
def lowercase__ ( snake_case_ :Dict , snake_case_ :Any , snake_case_ :Optional[Any] ):
with open(snake_case_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start prompt.
__UpperCAmelCase = 0
while not lines[start_index].startswith(snake_case_ ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
while not lines[end_index].startswith(snake_case_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
_lowercase : List[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowercase : int = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case_ , set() )
__UpperCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any]=False ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = _find_text_in_file(
filename=os.path.join(snake_case_ , snake_case_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__UpperCAmelCase = get_model_list_for_task(snake_case_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case_ , snake_case_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowercase : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 49 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 1 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__UpperCamelCase = hex_num[0] == '-'
if is_negative:
__UpperCamelCase = hex_num[1:]
try:
__UpperCamelCase = int(_lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__UpperCamelCase = ''
while int_num > 0:
__UpperCamelCase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = """ybelkada/fonts"""
def SCREAMING_SNAKE_CASE_ ( ) -> int:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict , _snake_case :Optional[int] , _snake_case :int ) -> Any:
requires_backends(_snake_case , ['''torch'''] )
_check_torch_version()
_A = image_tensor.unsqueeze(0 )
_A = torch.nn.functional.unfold(_snake_case , (patch_height, patch_width) , stride=(patch_height, patch_width) )
_A = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _snake_case , _snake_case , -1 )
_A = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int = 36 , _snake_case :str = "black" , _snake_case :str = "white" , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :int = 5 , _snake_case :Optional[bytes] = None , _snake_case :Optional[str] = None , ) -> Image.Image:
requires_backends(_snake_case , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
_A = textwrap.TextWrapper(width=80 )
_A = wrapper.wrap(text=_snake_case )
_A = '''\n'''.join(_snake_case )
if font_bytes is not None and font_path is None:
_A = io.BytesIO(_snake_case )
elif font_path is not None:
_A = font_path
else:
_A = hf_hub_download(_snake_case , '''Arial.TTF''' )
_A = ImageFont.truetype(_snake_case , encoding='''UTF-8''' , size=_snake_case )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_A = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , _snake_case ) )
_A , _A , _A , _A = temp_draw.textbbox((0, 0) , _snake_case , _snake_case )
# Create the actual image with a bit of padding around the text.
_A = text_width + left_padding + right_padding
_A = text_height + top_padding + bottom_padding
_A = Image.new('''RGB''' , (image_width, image_height) , _snake_case )
_A = ImageDraw.Draw(_snake_case )
draw.text(xy=(left_padding, top_padding) , text=_snake_case , fill=_snake_case , font=_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( _snake_case :np.ndarray , _snake_case :str , **_snake_case :Optional[int] ) -> Any:
requires_backends(_snake_case , '''vision''' )
# Convert to PIL image if necessary
_A = to_pil_image(_snake_case )
_A = render_text(_snake_case , **_snake_case )
_A = max(header_image.width , image.width )
_A = int(image.height * (new_width / image.width) )
_A = int(header_image.height * (new_width / header_image.width) )
_A = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
_A = to_numpy_array(_snake_case )
if infer_channel_dimension_format(_snake_case ) == ChannelDimension.LAST:
_A = to_channel_dimension_format(_snake_case , ChannelDimension.LAST )
return new_image
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Optional[Any] = ["flattened_patches"]
def __init__( self : Any , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : int = 20_48 , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Any , ) -> None:
super().__init__(**__lowerCAmelCase )
_A = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
_A = do_normalize
_A = do_convert_rgb
_A = max_patches
_A = is_vqa
def snake_case_ ( self : Optional[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : dict , **__lowerCAmelCase : str ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
_A = to_channel_dimension_format(__lowerCAmelCase , ChannelDimension.FIRST )
_A = torch.from_numpy(__lowerCAmelCase )
_A , _A = patch_size['''height'''], patch_size['''width''']
_A , _A = get_image_size(__lowerCAmelCase )
# maximize scale s.t.
_A = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_A = max(min(math.floor(scale * image_height / patch_height ) , __lowerCAmelCase ) , 1 )
_A = max(min(math.floor(scale * image_width / patch_width ) , __lowerCAmelCase ) , 1 )
_A = max(num_feasible_rows * patch_height , 1 )
_A = max(num_feasible_cols * patch_width , 1 )
_A = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=__lowerCAmelCase , antialias=__lowerCAmelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_A = torch_extract_patches(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = patches.shape
_A = patches_shape[1]
_A = patches_shape[2]
_A = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_A = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_A = torch.arange(__lowerCAmelCase ).reshape([rows, 1] ).repeat(1 , __lowerCAmelCase ).reshape([rows * columns, 1] )
_A = torch.arange(__lowerCAmelCase ).reshape([1, columns] ).repeat(__lowerCAmelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_A = row_ids.to(torch.floataa )
_A = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_A = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_A = torch.nn.functional.pad(__lowerCAmelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
_A = to_numpy_array(__lowerCAmelCase )
return result
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : str ) -> np.ndarray:
if image.dtype == np.uinta:
_A = image.astype(np.floataa )
# take mean across the whole `image`
_A = np.mean(__lowerCAmelCase )
_A = np.std(__lowerCAmelCase )
_A = max(__lowerCAmelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Dict[str, int]] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : int , ) -> ImageInput:
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_A = patch_size if patch_size is not None else self.patch_size
_A = max_patches if max_patches is not None else self.max_patches
_A = self.is_vqa
if kwargs.get('''data_format''' , __lowerCAmelCase ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
_A = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_A = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_A = [to_numpy_array(__lowerCAmelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
_A = kwargs.pop('''font_bytes''' , __lowerCAmelCase )
_A = kwargs.pop('''font_path''' , __lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [header_text] * len(__lowerCAmelCase )
_A = [
render_header(__lowerCAmelCase , header_text[i] , font_bytes=__lowerCAmelCase , font_path=__lowerCAmelCase )
for i, image in enumerate(__lowerCAmelCase )
]
if do_normalize:
_A = [self.normalize(image=__lowerCAmelCase ) for image in images]
# convert to torch tensor and permute
_A = [
self.extract_flattened_patches(image=__lowerCAmelCase , max_patches=__lowerCAmelCase , patch_size=__lowerCAmelCase )
for image in images
]
# create attention mask in numpy
_A = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_A = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=__lowerCAmelCase )
return encoded_outputs
| 2 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
'''simple docstring'''
import qiskit
def A_( A : int = 2):
UpperCamelCase = qubits
# Using Aer's simulator
UpperCamelCase = qiskit.Aer.get_backend('aer_simulator')
# Creating a Quantum Circuit acting on the q register
UpperCamelCase = qiskit.QuantumCircuit(A , A)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1 , A):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A)) , list(range(A)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase = qiskit.execute(A , A , shots=1000)
return job.result().get_counts(A)
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 3 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
lowerCAmelCase = b * b - 4 * a * c
lowerCAmelCase = (-b + sqrt(_UpperCAmelCase )) / (2 * a)
lowerCAmelCase = (-b - sqrt(_UpperCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase ,lowerCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 4 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 5 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 6 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : Any=None , _snake_case : List[str]=None ) -> Optional[int]:
'''simple docstring'''
_A = True
while ask_again:
_A = input(_snake_case )
try:
if default is not None and len(_snake_case ) == 0:
return default
return convert_value(_snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_snake_case )
def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any]=[] , _snake_case : str=None , _snake_case : Tuple=0 ) -> Union[str, Any]:
'''simple docstring'''
_A = BulletMenu(_snake_case , _snake_case )
_A = menu.run(default_choice=_snake_case )
return convert_value(_snake_case ) if convert_value is not None else result
def _snake_case ( _snake_case : str ) -> Optional[Any]:
'''simple docstring'''
_A = int(_snake_case )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def _snake_case ( _snake_case : Any ) -> Tuple:
'''simple docstring'''
_A = int(_snake_case )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def _snake_case ( _snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = int(_snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( _snake_case : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_A = int(_snake_case )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def _snake_case ( _snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
_A = int(_snake_case )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ):
_A = super()._format_usage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = usage.replace('<command> [<args>] ' , '' )
return usage
| 7 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : str = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''trocr'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[str] = vocab_size
__A : List[str] = d_model
__A : List[Any] = decoder_layers
__A : List[Any] = decoder_attention_heads
__A : List[str] = decoder_ffn_dim
__A : Union[str, Any] = activation_function
__A : str = max_position_embeddings
__A : Tuple = dropout
__A : Union[str, Any] = attention_dropout
__A : List[Any] = activation_dropout
__A : Optional[int] = init_std
__A : int = decoder_layerdrop
__A : List[Any] = use_cache
__A : str = scale_embedding
__A : Optional[int] = use_learned_position_embeddings
__A : Optional[Any] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 8 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE__ = {
'''n_samples''': 6_4,
'''horizon''': 3_2,
'''num_inference_steps''': 2_0,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''hopper-medium-v2'''
SCREAMING_SNAKE_CASE__ = gym.make(env_name)
SCREAMING_SNAKE_CASE__ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE__ = env.reset()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1_0_0_0
SCREAMING_SNAKE_CASE__ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE__ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = env.step(denorm_actions)
SCREAMING_SNAKE_CASE__ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE__ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 9 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , ):
_UpperCamelCase , _UpperCamelCase = coefficient_matrix.shape
_UpperCamelCase , _UpperCamelCase = constant_matrix.shape
if rowsa != colsa:
_UpperCamelCase = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__snake_case )
if colsa != 1:
_UpperCamelCase = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__snake_case )
if rowsa != rowsa:
_UpperCamelCase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__snake_case )
if len(__snake_case ) != rowsa:
_UpperCamelCase = (
'''Number of initial values must be equal to number of rows in coefficient '''
f"""matrix but received {len(__snake_case )} and {rowsa}"""
)
raise ValueError(__snake_case )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
_UpperCamelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCamelCase , _UpperCamelCase = table.shape
strictly_diagonally_dominant(__snake_case )
# Iterates the whole matrix for given number of times
for _ in range(__snake_case ):
_UpperCamelCase = []
for row in range(__snake_case ):
_UpperCamelCase = 0
for col in range(__snake_case ):
if col == row:
_UpperCamelCase = table[row][col]
elif col == cols - 1:
_UpperCamelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCamelCase = (temp + val) / denom
new_val.append(__snake_case )
_UpperCamelCase = new_val
return [float(__snake_case ) for i in new_val]
def _snake_case ( __snake_case ):
_UpperCamelCase , _UpperCamelCase = table.shape
_UpperCamelCase = True
for i in range(0 , __snake_case ):
_UpperCamelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__(self , A , A=3 , A=32 , A=3 , A=10 , A=[8, 16, 32, 64] , A=[1, 1, 2, 1] , A=True , A=True , A="relu" , A=3 , A=None , A=["stage2", "stage3", "stage4"] , A=[2, 3, 4] , A=1 , ) -> Any:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = embeddings_size
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = hidden_act
_a = num_labels
_a = scope
_a = len(A )
_a = out_features
_a = out_indices
_a = num_groups
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def a__ (self , A , A , A ) -> List[str]:
"""simple docstring"""
_a = BitModel(config=A )
model.to(A )
model.eval()
_a = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ (self , A , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.num_labels
_a = BitForImageClassification(A )
model.to(A )
model.eval()
_a = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ (self , A , A , A ) -> Any:
"""simple docstring"""
_a = BitBackbone(config=A )
model.to(A )
model.eval()
_a = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a = None
_a = BitBackbone(config=A )
model.to(A )
model.eval()
_a = model(A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCamelCase : str = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : int = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[Any] = False
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = BitModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A )
def a__ (self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def a__ (self ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def a__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def a__ (self ) -> Dict:
"""simple docstring"""
pass
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A )
def a__ (self ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(config=A )
for name, module in model.named_modules():
if isinstance(A , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def a__ (self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(A , A , A ):
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a = layer_type
_a = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(A , A , A )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def a__ (self ) -> int:
"""simple docstring"""
pass
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def a__ (self ) -> int:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = BitModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ():
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ) -> str:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def a__ (self ) -> int:
"""simple docstring"""
_a = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
_a = model(**A )
# verify the logits
_a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
_a = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@require_torch
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = (BitBackbone,) if is_torch_available() else ()
__lowerCamelCase : List[str] = BitConfig
__lowerCamelCase : Any = False
def a__ (self ) -> Dict:
"""simple docstring"""
_a = BitModelTester(self )
| 11 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
lowercase__ : Any = iter(lowercase_ )
while True:
lowercase__ : Any = tuple(itertools.islice(lowercase_ , lowercase_ ) )
if not chunk:
return
yield chunk
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : Union[str, Any] = """"""
if len(lowercase_ ) < 2:
return dirty
for i in range(len(lowercase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase_ ) & 1:
clean += "X"
return clean
def UpperCamelCase ( lowercase_ ) -> list[str]:
'''simple docstring'''
lowercase__ : Optional[Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase_ )
return table
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : str = generate_table(lowercase_ )
lowercase__ : Optional[int] = prepare_input(lowercase_ )
lowercase__ : Optional[int] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase_ , 2 ):
lowercase__ , lowercase__ : Dict = divmod(table.index(lowercase_ ) , 5 )
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : int = generate_table(lowercase_ )
lowercase__ : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase_ , 2 ):
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 )
lowercase__ , lowercase__ : int = divmod(table.index(lowercase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 12 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
'''simple docstring'''
from functools import reduce
A__ : Union[str, Any] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCAmelCase__ ( UpperCAmelCase_ : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCAmelCase_ , UpperCAmelCase_ : str(int(UpperCAmelCase_ ) * int(UpperCAmelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(UpperCAmelCase_ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a__ = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __UpperCAmelCase ( __a : List[str] ) -> str:
"""simple docstring"""
_a : int = list(s_dict.keys() )
for key in keys:
_a : Union[str, Any] = R'''.*/layers_(\d+)'''
_a : List[str] = key
if re.match(__a ,__a ):
_a : Tuple = re.sub(R'''layers_(\d+)''' ,R'''block/\1/layer''' ,__a )
_a : Union[str, Any] = R'''(encoder|decoder)\/'''
if re.match(__a ,__a ):
_a : Tuple = re.match(__a ,__a ).groups()
if groups[0] == "encoder":
_a : Union[str, Any] = re.sub(R'''/mlp/''' ,R'''/1/mlp/''' ,__a )
_a : Tuple = re.sub(R'''/pre_mlp_layer_norm/''' ,R'''/1/layer_norm/''' ,__a )
elif groups[0] == "decoder":
_a : Dict = re.sub(R'''/mlp/''' ,R'''/2/mlp/''' ,__a )
_a : Optional[Any] = re.sub(R'''/pre_mlp_layer_norm/''' ,R'''/2/layer_norm/''' ,__a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a : Tuple = new_key.replace(__a ,__a )
print(F"""{key} -> {new_key}""" )
_a : List[Any] = s_dict.pop(__a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : Optional[int] = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a : str = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_a : Optional[Any] = s_dict[key].shape[0]
_a : Optional[Any] = s_dict[key]
for idx in range(__a ):
_a : List[str] = expert_weihts[idx]
print(F"""{key} -> {key.replace('expert/' ,'nested fstring' )}""" )
s_dict.pop(__a )
return s_dict
a__ = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __UpperCAmelCase ( __a : int ,__a : Any ) -> Union[str, Any]:
"""simple docstring"""
import regex as re
with open(__a ,'''r''' ) as f:
_a : List[Any] = f.read()
_a : Dict = re.findall(R'''(.*) = ([0-9.]*)''' ,__a )
_a : List[Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a : Optional[int] = float(__a ) if '''.''' in value else int(__a )
_a : Dict = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' ,__a )[0]
_a : str = str(activation[1] )
_a : Tuple = num_experts
_a : Tuple = SwitchTransformersConfig(**__a )
return config
def __UpperCAmelCase ( __a : Dict ,__a : Optional[int] ,__a : Dict=None ,__a : int="./" ,__a : Union[str, Any]=8 ) -> Dict:
"""simple docstring"""
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
_a : List[Any] = checkpoints.load_tax_checkpoint(__a )
if gin_file is not None:
_a : Tuple = convert_gin_to_config(__a ,__a )
else:
_a : Optional[int] = SwitchTransformersConfig.from_pretrained(__a )
_a : int = SwitchTransformersForConditionalGeneration(__a )
_a : Optional[int] = flax_params['''target''']
_a : Union[str, Any] = flatten_dict(__a ,sep='''/''' )
_a : Any = rename_keys(__a )
_a : Any = unflatten_dict(__a ,sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__a ,__a )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 14 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A : Optional[Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
require_version(deps[pkg] , __magic_name__ )
| 15 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase_ ( _lowercase ):
_lowercase : Union[str, Any] = '''EncodecFeatureExtractor'''
_lowercase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[Any] , __A : Any , __A : Tuple ):
super().__init__(__A , __A )
__A : Dict = self.feature_extractor
__A : List[str] = False
def lowerCAmelCase_ ( self : Union[str, Any] , __A : str=None , __A : Tuple=None , __A : Dict=True ):
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self : Optional[Any] , *__A : Tuple , **__A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
__A : str = kwargs.pop("""audio""" , __A )
__A : Optional[Any] = kwargs.pop("""sampling_rate""" , __A )
__A : int = kwargs.pop("""text""" , __A )
if len(__A ) > 0:
__A : int = args[0]
__A : Dict = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
__A : Dict = self.tokenizer(__A , **__A )
if audio is not None:
__A : Optional[int] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__A : List[Any] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
__A : int = audio_inputs["""padding_mask"""]
return inputs
def lowerCAmelCase_ ( self : List[str] , *__A : int , **__A : Tuple ):
__A : Optional[int] = kwargs.pop("""audio""" , __A )
__A : List[str] = kwargs.pop("""padding_mask""" , __A )
if len(__A ) > 0:
__A : Dict = args[0]
__A : Optional[int] = args[1:]
if audio_values is not None:
return self._decode_audio(__A , padding_mask=__A )
else:
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase_ ( self : Optional[Any] , *__A : Dict , **__A : Any ):
return self.tokenizer.decode(*__A , **__A )
def lowerCAmelCase_ ( self : Tuple , __A : Union[str, Any] , __A : Optional = None ):
__A : List[str] = to_numpy(__A )
__A , __A , __A : Tuple = audio_values.shape
if padding_mask is None:
return list(__A )
__A : Union[str, Any] = to_numpy(__A )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__A : List[str] = seq_len - padding_mask.shape[-1]
__A : Tuple = 1 - self.feature_extractor.padding_value
__A : Optional[int] = np.pad(__A , ((0, 0), (0, difference)) , """constant""" , constant_values=__A )
__A : int = audio_values.tolist()
for i in range(__A ):
__A : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__A : List[Any] = sliced_audio.reshape(__A , -1 )
return audio_values
| 17 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
"""simple docstring"""
_a = 8.314_4598
def lowerCamelCase__ ( __snake_case, __snake_case ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a = 300
_a = 28
_a = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 19 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.