code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase : List[str] = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = """hopper-medium-v2"""
lowerCAmelCase : Union[str, Any] = gym.make(env_name)
lowerCAmelCase : int = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
lowerCAmelCase : List[str] = env.reset()
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : str = 0
lowerCAmelCase : Dict = 10_00
lowerCAmelCase : int = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase : str = env.step(denorm_actions)
lowerCAmelCase : Optional[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase : Optional[Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = order
# a_{0} ... a_{k}
_lowerCAmelCase : List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_lowerCAmelCase : str = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_lowerCAmelCase : List[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_lowerCAmelCase : Optional[int] = [0.0] * self.order
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) < self.order:
_lowerCAmelCase : str = [1.0, *a_coeffs]
if len(snake_case__ ) != self.order + 1:
_lowerCAmelCase : Dict = (
F'Expected a_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case__ )}'
)
raise ValueError(snake_case__ )
if len(snake_case__ ) != self.order + 1:
_lowerCAmelCase : Tuple = (
F'Expected b_coeffs to have {self.order + 1} elements '
F'for {self.order}-order filter, got {len(snake_case__ )}'
)
raise ValueError(snake_case__ )
_lowerCAmelCase : Optional[Any] = a_coeffs
_lowerCAmelCase : Union[str, Any] = b_coeffs
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_lowerCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_lowerCAmelCase : int = self.input_history[:-1]
_lowerCAmelCase : Optional[Any] = self.output_history[:-1]
_lowerCAmelCase : Tuple = sample
_lowerCAmelCase : str = result
return result
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["image_processor", "tokenizer"]
__magic_name__ = "LayoutLMv2ImageProcessor"
__magic_name__ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case__ , )
_lowerCAmelCase : Union[str, Any] = kwargs.pop('feature_extractor' )
_lowerCAmelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
_lowerCAmelCase : Union[str, Any] = self.image_processor(images=snake_case__ , return_tensors=snake_case__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Any = [text] # add batch dimension (as the image processor always adds a batch dimension)
_lowerCAmelCase : Dict = features['words']
_lowerCAmelCase : str = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
# add pixel values
_lowerCAmelCase : Optional[Any] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
_lowerCAmelCase : Union[str, Any] = self.get_overflowing_images(snake_case__ , encoded_inputs['overflow_to_sample_mapping'] )
_lowerCAmelCase : int = images
return encoded_inputs
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(snake_case__ )} and {len(snake_case__ )}' )
return images_with_overflow
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def a ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def a ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case__ , )
return self.image_processor_class
@property
def a ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case__ , )
return self.image_processor
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def lowercase (_A , _A , _A ):
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase (_A , _A , _A = None ):
"""simple docstring"""
_lowerCAmelCase : Any = tesseract_config if tesseract_config is not None else ''
# apply OCR
_lowerCAmelCase : Union[str, Any] = to_pil_image(_A )
_lowerCAmelCase : Tuple = pil_image.size
_lowerCAmelCase : int = pytesseract.image_to_data(_A , lang=_A , output_type='dict' , config=_A )
_lowerCAmelCase : Dict = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
_lowerCAmelCase : List[Any] = [idx for idx, word in enumerate(_A ) if not word.strip()]
_lowerCAmelCase : int = [word for idx, word in enumerate(_A ) if idx not in irrelevant_indices]
_lowerCAmelCase : Any = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
_lowerCAmelCase : str = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
_lowerCAmelCase : Union[str, Any] = [coord for idx, coord in enumerate(_A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowerCAmelCase : Optional[Any] = []
for x, y, w, h in zip(_A , _A , _A , _A ):
_lowerCAmelCase : int = [x, y, x + w, y + h]
actual_boxes.append(_A )
# finally, normalize the bounding boxes
_lowerCAmelCase : Union[str, Any] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_A , _A , _A ) )
assert len(_A ) == len(_A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = size if size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase : List[str] = get_size_dict(snake_case__ )
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : Optional[int] = resample
_lowerCAmelCase : int = apply_ocr
_lowerCAmelCase : Optional[int] = ocr_lang
_lowerCAmelCase : Any = tesseract_config
def a ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
_lowerCAmelCase : str = (size['height'], size['width'])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Optional[Any] = size if size is not None else self.size
_lowerCAmelCase : int = get_size_dict(snake_case__ )
_lowerCAmelCase : Dict = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowerCAmelCase : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowerCAmelCase : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowerCAmelCase : Dict = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase : int = [to_numpy_array(snake_case__ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = []
for image in images:
_lowerCAmelCase : Optional[int] = apply_tesseract(snake_case__ , snake_case__ , snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
_lowerCAmelCase : Union[str, Any] = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowerCAmelCase : Dict = [flip_channel_order(snake_case__ ) for image in images]
_lowerCAmelCase : str = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
_lowerCAmelCase : Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case__ )
if apply_ocr:
_lowerCAmelCase : List[str] = words_batch
_lowerCAmelCase : Optional[int] = boxes_batch
return data
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=33 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : str = use_input_mask
_lowerCAmelCase : Optional[int] = use_token_type_ids
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Optional[int] = num_choices
_lowerCAmelCase : Dict = scope
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = EsmModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ , attention_mask=snake_case__ )
_lowerCAmelCase : Union[str, Any] = model(snake_case__ )
_lowerCAmelCase : Any = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = EsmForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : Dict = EsmForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : List[str] = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Dict = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = False
__magic_name__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ = ()
__magic_name__ = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ = True
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = EsmModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = EsmModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()[0]
_lowerCAmelCase : Dict = EsmEmbeddings(config=snake_case__ )
_lowerCAmelCase : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
_lowerCAmelCase : List[str] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_lowerCAmelCase : Optional[Any] = create_position_ids_from_input_ids(snake_case__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case__ , snake_case__ ) ) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()[0]
_lowerCAmelCase : Any = EsmEmbeddings(config=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.empty(2 , 4 , 30 )
_lowerCAmelCase : Dict = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_lowerCAmelCase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
_lowerCAmelCase : Optional[int] = embeddings.create_position_ids_from_inputs_embeds(snake_case__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(snake_case__ , snake_case__ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Optional[int] = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
_lowerCAmelCase : str = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Tuple = model(snake_case__ )[0]
_lowerCAmelCase : int = 33
_lowerCAmelCase : Dict = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : List[str] = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
_lowerCAmelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCAmelCase : Dict = model(snake_case__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
def lowercase (_A , _A ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase ():
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
import requests
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = {'Content-Type': 'application/json'}
_lowerCAmelCase : List[str] = requests.post(_A , json={'text': message_body} , headers=_A )
if response.status_code != 2_0_0:
_lowerCAmelCase : Any = (
'Request to slack returned an error '
f'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(_A )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCAmelCase : int = get_logger()
lowerCAmelCase : Optional[dict] = None
class UpperCamelCase__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
super().__init__(features=snake_case__ )
import jax
from jaxlib.xla_client import Device
if isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F'Expected {device} to be a `str` not {type(snake_case__ )}, as `jaxlib.xla_extension.Device` '
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : Tuple = device if isinstance(snake_case__ , snake_case__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : List[str] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
_lowerCAmelCase : Tuple = str(jax.devices()[0] )
_lowerCAmelCase : Optional[int] = jnp_array_kwargs
@staticmethod
def a ( ):
'''simple docstring'''
import jax
return {str(snake_case__ ): device for device in jax.devices()}
def a ( self , snake_case__ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , snake_case__ ) and column:
if all(
isinstance(snake_case__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(snake_case__ , axis=0 )
return column
def a ( self , snake_case__ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , (str, bytes, type(snake_case__ )) ):
return value
elif isinstance(snake_case__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowerCAmelCase : Tuple = {}
if isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : Union[str, Any] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Optional[Any] = {'dtype': jnp.intaa}
elif isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowerCAmelCase : Optional[Any] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case__ , PIL.Image.Image ):
_lowerCAmelCase : List[Any] = np.asarray(snake_case__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Union[str, Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(snake_case__ , **{**default_dtype, **self.jnp_array_kwargs} )
def a ( self , snake_case__ ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(snake_case__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(snake_case__ , '__array__' ) and not isinstance(snake_case__ , jax.Array ):
_lowerCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
elif isinstance(snake_case__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
return self._tensorize(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , snake_case__ , map_list=snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(snake_case__ )
_lowerCAmelCase : List[str] = self.python_features_decoder.decode_row(snake_case__ )
return self.recursive_tensorize(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.numpy_arrow_extractor().extract_column(snake_case__ )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(snake_case__ , pa_table.column_names[0] )
_lowerCAmelCase : int = self.recursive_tensorize(snake_case__ )
_lowerCAmelCase : Dict = self._consolidate(snake_case__ )
return column
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(snake_case__ )
_lowerCAmelCase : Union[str, Any] = self.python_features_decoder.decode_batch(snake_case__ )
_lowerCAmelCase : int = self.recursive_tensorize(snake_case__ )
for column_name in batch:
_lowerCAmelCase : List[Any] = self._consolidate(batch[column_name] )
return batch
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
lowerCAmelCase : Optional[int] = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
lowerCAmelCase : Tuple = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
lowerCAmelCase : Any = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowerCAmelCase : List[Any] = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
lowerCAmelCase : Any = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
lowerCAmelCase : List[str] = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
lowerCAmelCase : Union[str, Any] = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
lowerCAmelCase : Any = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : int = {
"""Salesforce/codegen-350M-mono""": 20_48,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = CodeGenTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
if kwargs.pop('add_bos_token' , snake_case__ ):
_lowerCAmelCase : int = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Union[str, Any] = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : Any = add_prefix_space
_lowerCAmelCase : List[Any] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = super().decode(
token_ids=snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , **snake_case__ , )
if truncate_before_pattern is not None and len(snake_case__ ) > 0:
_lowerCAmelCase : Tuple = self.truncate(snake_case__ , snake_case__ )
return decoded_text
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
def find_re(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Any = pattern.search(snake_case__ , snake_case__ )
return m.start() if m else -1
_lowerCAmelCase : Any = [re.compile(snake_case__ , re.MULTILINE ) for pattern in truncate_before_pattern]
_lowerCAmelCase : Union[str, Any] = list(re.finditer('^print' , snake_case__ , re.MULTILINE ) )
if len(snake_case__ ) > 1:
_lowerCAmelCase : Optional[int] = completion[: prints[1].start()]
_lowerCAmelCase : int = list(re.finditer('^def' , snake_case__ , re.MULTILINE ) )
if len(snake_case__ ) > 1:
_lowerCAmelCase : Dict = completion[: defs[1].start()]
_lowerCAmelCase : Any = 0
_lowerCAmelCase : int = [
pos for pos in [find_re(snake_case__ , snake_case__ , snake_case__ ) for terminal in terminals] if pos != -1
]
if len(snake_case__ ) > 0:
return completion[: min(snake_case__ )]
else:
return completion
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 1_0_0_0_0.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase : Optional[Any] = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase (_A , _A , _A=None , _A=None , _A=None , _A=None , _A=None , _A=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowerCAmelCase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowerCAmelCase : int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=32 , snake_case__=2 , snake_case__=1 , snake_case__=0 , snake_case__=0.02 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Union[str, Any] = seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : str = eos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : str = bos_token_id
_lowerCAmelCase : str = initializer_range
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowerCAmelCase : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowerCAmelCase : Optional[Any] = shift_tokens_right(snake_case__ , 1 , 2 )
_lowerCAmelCase : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case__ , )
_lowerCAmelCase : List[Any] = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = 20
_lowerCAmelCase : Dict = model_class_name(snake_case__ )
_lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] )
_lowerCAmelCase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCAmelCase : List[str] = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
_lowerCAmelCase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowerCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : Any = model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
_lowerCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCAmelCase : str = model.decode(
decoder_input_ids[:, -1:] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case__ , )
_lowerCAmelCase : Optional[int] = model.decode(snake_case__ , snake_case__ )
_lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 20
_lowerCAmelCase : List[Any] = model_class_name(snake_case__ )
_lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] )
_lowerCAmelCase : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , )
_lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case__ , decoder_position_ids=snake_case__ , )
_lowerCAmelCase : Tuple = model.decode(snake_case__ , snake_case__ , decoder_attention_mask=snake_case__ )
_lowerCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 9_9
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_lowerCAmelCase : Optional[Any] = input_ids.shape[0]
_lowerCAmelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self._get_config_and_data()
_lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(snake_case__ )
_lowerCAmelCase : Union[str, Any] = lm_model(input_ids=snake_case__ )
_lowerCAmelCase : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_lowerCAmelCase : List[Any] = FlaxBlenderbotForConditionalGeneration(snake_case__ )
_lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_lowerCAmelCase : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_lowerCAmelCase : Tuple = lm_model(input_ids=snake_case__ , decoder_input_ids=snake_case__ )
_lowerCAmelCase : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_lowerCAmelCase : Optional[int] = shift_tokens_right(snake_case__ , 1 , 2 )
_lowerCAmelCase : Optional[int] = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum()
_lowerCAmelCase : Any = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(snake_case__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = True
__magic_name__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__magic_name__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
@jax.jit
def encode_jitted(snake_case__ , snake_case__=None , **snake_case__ ):
return model.encode(input_ids=snake_case__ , attention_mask=snake_case__ )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase : Union[str, Any] = encode_jitted(**snake_case__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase : str = encode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Any = model_class(snake_case__ )
_lowerCAmelCase : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowerCAmelCase : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case__ , snake_case__ , snake_case__ ):
return model.decode(
decoder_input_ids=snake_case__ , decoder_attention_mask=snake_case__ , encoder_outputs=snake_case__ , )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase : Union[str, Any] = decode_jitted(**snake_case__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase : int = decode_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Dict = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowerCAmelCase : str = np.ones((1, 1) ) * model.config.eos_token_id
_lowerCAmelCase : Any = model(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
_lowerCAmelCase : Tuple = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
_lowerCAmelCase : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=snake_case__ )
_lowerCAmelCase : Dict = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
_lowerCAmelCase : List[Any] = ['Sam']
_lowerCAmelCase : str = tokenizer(snake_case__ , return_tensors='jax' )
_lowerCAmelCase : Optional[Any] = model.generate(**snake_case__ , **snake_case__ )
_lowerCAmelCase : Any = 'Sam is a great name. It means "sun" in Gaelic.'
_lowerCAmelCase : str = tokenizer.batch_decode(snake_case__ , **snake_case__ )
assert generated_txt[0].strip() == tgt_text
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCAmelCase : Any = {
"""facebook/blenderbot_small-90M""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = BlenderbotSmallTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case__ , merges=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , ) , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Optional[int] = add_prefix_space
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "luke"
def __init__( self , snake_case__=5_0267 , snake_case__=50_0000 , snake_case__=768 , snake_case__=256 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=True , snake_case__=None , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Any = entity_vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = entity_emb_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : int = type_vocab_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = use_entity_aware_attention
_lowerCAmelCase : Union[str, Any] = classifier_dropout
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCAmelCase : List[str] = ''
_lowerCAmelCase : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_A ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCAmelCase : List[str] = [1 for i in range(len(_A ) )]
# for each character in new_string find corresponding palindromic string
_lowerCAmelCase : Any = 0
for j in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_A )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCAmelCase : Optional[Any] = j - k + 1 # noqa: E741
_lowerCAmelCase : int = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCAmelCase : Dict = length[j]
_lowerCAmelCase : Optional[int] = j
# create that string
_lowerCAmelCase : List[str] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
return 1_0 - x * x
def lowercase (_A , _A ):
"""simple docstring"""
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
_lowerCAmelCase : List[str] = a
while (b - a) >= 0.01:
# Find middle point
_lowerCAmelCase : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
_lowerCAmelCase : Optional[int] = c
else:
_lowerCAmelCase : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 351 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase : Dict = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase : str = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase : Tuple = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase : Optional[Any] = torch.nn.Linear(1_00, 2_00)
lowerCAmelCase : List[str] = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase : List[Any] = """"""
lowerCAmelCase : Tuple = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 25 | 0 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_A , int(b / 2 ) ) * actual_power(_A , int(b / 2 ) )
else:
return a * actual_power(_A , int(b / 2 ) ) * actual_power(_A , int(b / 2 ) )
def lowercase (_A , _A ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(_A , _A )
return actual_power(_A , _A )
if __name__ == "__main__":
print(power(-2, -3))
| 352 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "trajectory_transformer"
__magic_name__ = ["past_key_values"]
__magic_name__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = action_weight
_lowerCAmelCase : Optional[int] = reward_weight
_lowerCAmelCase : Union[str, Any] = value_weight
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Tuple = block_size
_lowerCAmelCase : List[Any] = action_dim
_lowerCAmelCase : List[Any] = observation_dim
_lowerCAmelCase : Union[str, Any] = transition_dim
_lowerCAmelCase : Tuple = learning_rate
_lowerCAmelCase : int = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Tuple = n_embd
_lowerCAmelCase : Optional[Any] = embd_pdrop
_lowerCAmelCase : Union[str, Any] = attn_pdrop
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
_lowerCAmelCase : List[Any] = use_cache
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 25 | 0 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "efficientformer"
def __init__( self , snake_case__ = [3, 2, 6, 4] , snake_case__ = [48, 96, 224, 448] , snake_case__ = [True, True, True, True] , snake_case__ = 448 , snake_case__ = 32 , snake_case__ = 4 , snake_case__ = 7 , snake_case__ = 5 , snake_case__ = 8 , snake_case__ = 4 , snake_case__ = 0.0 , snake_case__ = 16 , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = 3 , snake_case__ = 2 , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = 1 , snake_case__ = True , snake_case__ = True , snake_case__ = 1E-5 , snake_case__ = "gelu" , snake_case__ = 0.02 , snake_case__ = 1E-12 , snake_case__ = 224 , snake_case__ = 1E-05 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Dict = hidden_sizes
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : List[Any] = depths
_lowerCAmelCase : Dict = mlp_expansion_ratio
_lowerCAmelCase : Tuple = downsamples
_lowerCAmelCase : Optional[int] = dim
_lowerCAmelCase : str = key_dim
_lowerCAmelCase : Any = attention_ratio
_lowerCAmelCase : Any = resolution
_lowerCAmelCase : str = pool_size
_lowerCAmelCase : Optional[int] = downsample_patch_size
_lowerCAmelCase : Optional[int] = downsample_stride
_lowerCAmelCase : Dict = downsample_pad
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Union[str, Any] = num_metaad_blocks
_lowerCAmelCase : Any = distillation
_lowerCAmelCase : Tuple = use_layer_scale
_lowerCAmelCase : Optional[int] = layer_scale_init_value
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : Tuple = batch_norm_eps
| 353 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DiTPipeline
__magic_name__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__magic_name__ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__magic_name__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__magic_name__ = False
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case__ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case__ , )
_lowerCAmelCase : Tuple = AutoencoderKL()
_lowerCAmelCase : str = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Tuple = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Union[str, Any] = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = 'cpu'
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(snake_case__ )
_lowerCAmelCase : List[Any] = pipe(**snake_case__ ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_lowerCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case__ , 1E-3 )
def a ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=snake_case__ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_lowerCAmelCase : int = ['vase', 'umbrella', 'white shark', 'white wolf']
_lowerCAmelCase : Dict = pipe.get_label_ids(snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(snake_case__ , generator=snake_case__ , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case__ , snake_case__ ):
_lowerCAmelCase : Optional[int] = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_lowerCAmelCase : Optional[int] = ['vase', 'umbrella']
_lowerCAmelCase : List[Any] = pipe.get_label_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pipe(snake_case__ , generator=snake_case__ , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case__ , snake_case__ ):
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 354 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase (_A , _A , _A , _A , _A , ):
"""simple docstring"""
_lowerCAmelCase : str = len(_A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _A , _A , )
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , _A , _A )
# Print all the boards
for board in boards:
for column in board:
print(_A )
print('' )
print(len(_A ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 355 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 0 |
'''simple docstring'''
from math import sqrt
def lowercase (_A ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_A = 1_0_0_0_1 ):
"""simple docstring"""
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Optional[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(_A ):
count += 1
while count != nth:
number += 2
if is_prime(_A ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 356 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
lowerCAmelCase : Optional[int] = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowercase (_A , _A=1 , _A=2_5_6 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase (_A ):
"""simple docstring"""
with open(_A , 'r' ) as f:
return json.load(_A )
def lowercase (_A , _A ):
"""simple docstring"""
with open(_A , 'w' ) as f:
json.dump(_A , _A )
def lowercase (_A , _A , _A , _A=True ):
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Optional[Any] = os.path.join(_A , 'tmp' )
os.makedirs(_A , exist_ok=_A )
_lowerCAmelCase : Any = read_json(os.path.join(_A , 'params.json' ) )
_lowerCAmelCase : List[str] = NUM_SHARDS[model_size]
_lowerCAmelCase : str = params['n_layers']
_lowerCAmelCase : Optional[int] = params['n_heads']
_lowerCAmelCase : int = n_heads // num_shards
_lowerCAmelCase : Optional[int] = params['dim']
_lowerCAmelCase : Union[str, Any] = dim // n_heads
_lowerCAmelCase : Union[str, Any] = 10_000.0
_lowerCAmelCase : str = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowerCAmelCase : str = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase : Optional[int] = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase : Union[str, Any] = n_heads
_lowerCAmelCase : Any = n_heads_per_shard
_lowerCAmelCase : Optional[Any] = dim
# permute for sliced rotary
def permute(_A , _A=n_heads , _A=dim , _A=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase : List[Any] = torch.load(os.path.join(_A , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowerCAmelCase : List[Any] = [
torch.load(os.path.join(_A , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(_A )
]
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Union[str, Any] = {'weight_map': {}}
for layer_i in range(_A ):
_lowerCAmelCase : List[str] = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase : str = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase : List[str] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
_lowerCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
_lowerCAmelCase : Dict = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
_lowerCAmelCase : Dict = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : Tuple = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(_A )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(_A )] , dim=0 )
_lowerCAmelCase : int = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase : Optional[Any] = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
_lowerCAmelCase : Dict = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase : List[str] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowerCAmelCase : List[str] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(_A )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase : int = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
_lowerCAmelCase : Tuple = {'total_size': param_count * 2}
write_json(_A , os.path.join(_A , 'pytorch_model.bin.index.json' ) )
_lowerCAmelCase : Optional[int] = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowerCAmelCase : int = params['multiple_of'] if 'multiple_of' in params else 2_5_6
_lowerCAmelCase : List[Any] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase : List[Any] = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=_A , help='Whether or not to save using `safetensors`.' )
_lowerCAmelCase : Any = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase : Dict = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 25 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase ():
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(3 , 4 )
_lowerCAmelCase : str = nn.BatchNormad(4 )
_lowerCAmelCase : int = nn.Linear(4 , 5 )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case__ ):
nonlocal batch_sizes
batch_sizes.append(snake_case__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(snake_case__ , [128, 64, 32, 16, 8] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case__ , snake_case__ ):
nonlocal batch_sizes
batch_sizes.append(snake_case__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_lowerCAmelCase : str = mock_training_loop_function('hello' )
self.assertListEqual(snake_case__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def a ( self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(snake_case__ ):
pass
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def a ( self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(snake_case__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def a ( self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(snake_case__ , snake_case__ , snake_case__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def a ( self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(snake_case__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.cuda.memory_allocated()
_lowerCAmelCase : Union[str, Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , snake_case__ )
_lowerCAmelCase : List[Any] = release_memory(snake_case__ )
self.assertEqual(torch.cuda.memory_allocated() , snake_case__ )
| 357 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
__magic_name__ = None
__magic_name__ = 1
__magic_name__ = None
__magic_name__ = False
__magic_name__ = None
__magic_name__ = None
def a ( self ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
| 25 | 0 |
'''simple docstring'''
import requests
lowerCAmelCase : Optional[Any] = """YOUR API KEY"""
def lowercase (_A , _A = giphy_api_key ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = '+'.join(query.split() )
_lowerCAmelCase : Any = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_lowerCAmelCase : Dict = requests.get(_A ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 358 |
'''simple docstring'''
lowerCAmelCase : List[str] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 25 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "visual_bert"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=512 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Any = visual_embedding_dim
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : int = bypass_transformer
_lowerCAmelCase : Union[str, Any] = special_visual_initialize
| 359 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : List[Any] = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 360 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 361 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : str = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = RobertaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : Tuple = getattr(snake_case__ , pre_tok_state.pop('type' ) )
_lowerCAmelCase : List[Any] = add_prefix_space
_lowerCAmelCase : List[str] = pre_tok_class(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = add_prefix_space
_lowerCAmelCase : Union[str, Any] = 'post_processor'
_lowerCAmelCase : int = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
_lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Any = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : str = tuple(state['cls'] )
_lowerCAmelCase : List[str] = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
_lowerCAmelCase : Union[str, Any] = trim_offsets
_lowerCAmelCase : Optional[int] = True
if changes_to_apply:
_lowerCAmelCase : Any = getattr(snake_case__ , state.pop('type' ) )
_lowerCAmelCase : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
def a ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
_lowerCAmelCase : Tuple = value
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = kwargs.get('is_split_into_words' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : int = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 | 0 |
'''simple docstring'''
def lowercase (_A , _A ):
"""simple docstring"""
while second != 0:
_lowerCAmelCase : List[Any] = first & second
first ^= second
_lowerCAmelCase : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] = int(input("""Enter the first number: """).strip())
lowerCAmelCase : List[str] = int(input("""Enter the second number: """).strip())
print(F'''{add(first, second) = }''')
| 362 |
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag.
lowerCAmelCase : Optional[int] = 1 # The second color of the flag.
lowerCAmelCase : int = 2 # The third color of the flag.
lowerCAmelCase : Any = (red, white, blue)
def lowercase (_A ):
"""simple docstring"""
if not sequence:
return []
if len(_A ) == 1:
return list(_A )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
_lowerCAmelCase : Optional[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : Optional[int] = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 25 | 0 |
'''simple docstring'''
def lowercase (_A = 2_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
_lowerCAmelCase : List[str] = [0] * (pence + 1)
_lowerCAmelCase : Tuple = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 363 |
'''simple docstring'''
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase : int = 6
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1_9_0_1
_lowerCAmelCase : Optional[Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase : List[str] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase : Optional[int] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 | 0 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowercase (_A ):
"""simple docstring"""
if not sentence:
return ""
_lowerCAmelCase : str = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[Any] = {
"""google/electra-small-generator""": 5_12,
"""google/electra-base-generator""": 5_12,
"""google/electra-large-generator""": 5_12,
"""google/electra-small-discriminator""": 5_12,
"""google/electra-base-discriminator""": 5_12,
"""google/electra-large-discriminator""": 5_12,
}
lowerCAmelCase : Dict = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_INIT_CONFIGURATION
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ElectraTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Tuple = getattr(snake_case__ , normalizer_state.pop('type' ) )
_lowerCAmelCase : List[Any] = do_lower_case
_lowerCAmelCase : List[Any] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Optional[int] = normalizer_class(**snake_case__ )
_lowerCAmelCase : List[Any] = do_lower_case
def a ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 365 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase : Tuple = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCAmelCase : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase : str = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase : List[str] = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase : Optional[int] = re.compile(r"""\[([^\]]+)\]""")
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = _re_indent.search(_A )
return "" if search is None else search.groups()[0]
def lowercase (_A , _A="" , _A=None , _A=None ):
"""simple docstring"""
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_A ):
index += 1
_lowerCAmelCase : Dict = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_A ) and (end_prompt is None or not lines[index].startswith(_A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_A ) )
if index < len(_A ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Union[str, Any] = []
else:
blocks.append('\n'.join(_A ) )
_lowerCAmelCase : List[str] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_A ) > 0:
blocks.append('\n'.join(_A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowercase (_A ):
"""simple docstring"""
def _inner(_A ):
return key(_A ).lower().replace('_' , '' )
return _inner
def lowercase (_A , _A=None ):
"""simple docstring"""
def noop(_A ):
return x
if key is None:
_lowerCAmelCase : List[Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : List[Any] = [obj for obj in objects if key(_A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Tuple = [obj for obj in objects if key(_A )[0].isupper() and not key(_A ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_A )[0].isupper()]
_lowerCAmelCase : Dict = ignore_underscore(_A )
return sorted(_A , key=_A ) + sorted(_A , key=_A ) + sorted(_A , key=_A )
def lowercase (_A ):
"""simple docstring"""
def _replace(_A ):
_lowerCAmelCase : Dict = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : Union[str, Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_A )] ) + "]"
_lowerCAmelCase : Tuple = import_statement.split('\n' )
if len(_A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[str] = [(i, _re_strip_line.search(_A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Dict = sort_objects(_A , key=lambda _A : x[1] )
_lowerCAmelCase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : List[str] = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(_A )] )
return "\n".join(_A )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Union[str, Any] = _re_bracket_content.sub(_replace , _A )
return import_statement
def lowercase (_A , _A=True ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(
_A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Tuple = main_blocks[block_idx]
_lowerCAmelCase : int = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Tuple = 0
while line_idx < len(_A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Dict = len(_A )
else:
line_idx += 1
if line_idx >= len(_A ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : str = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : List[Any] = split_code_in_indented_blocks(_A , indent_level=_A )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Optional[int] = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : int = [(pattern.search(_A ).groups()[0] if pattern.search(_A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : Dict = [(i, key) for i, key in enumerate(_A ) if key is not None]
_lowerCAmelCase : Optional[int] = [x[0] for x in sorted(_A , key=lambda _A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_A )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[int] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_A ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_A ) )
def lowercase (_A=True ):
"""simple docstring"""
_lowerCAmelCase : int = []
for root, _, files in os.walk(_A ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_A , '__init__.py' ) , check_only=_A )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_A , '__init__.py' )]
if len(_A ) > 0:
raise ValueError(f'Would overwrite {len(_A )} files, run `make style`.' )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCAmelCase : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 25 | 0 |
'''simple docstring'''
from math import factorial
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = real
if isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : str = [1] * rank
else:
_lowerCAmelCase : List[Any] = rank
def __repr__( self ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(snake_case__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case__ )
def __add__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return Dual(self.real + other , self.duals )
_lowerCAmelCase : List[str] = self.duals.copy()
_lowerCAmelCase : Optional[Any] = other.duals.copy()
if len(snake_case__ ) > len(snake_case__ ):
o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
elif len(snake_case__ ) < len(snake_case__ ):
s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
_lowerCAmelCase : Union[str, Any] = []
for i in range(len(snake_case__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case__ )
__magic_name__ = __add__
def __sub__( self , snake_case__ ):
'''simple docstring'''
return self + other * -1
def __mul__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case__ )
_lowerCAmelCase : int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case__ )
__magic_name__ = __mul__
def __truediv__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case__ )
raise ValueError
def __floordiv__( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
_lowerCAmelCase : Any = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case__ )
raise ValueError
def __pow__( self , snake_case__ ):
'''simple docstring'''
if n < 0 or isinstance(snake_case__ , snake_case__ ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
_lowerCAmelCase : Tuple = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase (_A , _A , _A ):
"""simple docstring"""
if not callable(_A ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(_A , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(_A , _A ):
raise ValueError('differentiate() requires an int as input for order' )
_lowerCAmelCase : str = Dual(_A , 1 )
_lowerCAmelCase : int = func(_A )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase (_A ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_A ):
for j in range(_A ):
_lowerCAmelCase : Tuple = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase : Optional[int] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
lowerCAmelCase : int = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 367 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase (_A ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCAmelCase : int = {'+', '-', '*', '/'}
_lowerCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
_lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = XGLMTokenizer
__magic_name__ = XGLMTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[str] = XGLMTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = '<pad>'
_lowerCAmelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(snake_case__ ) , 1008 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = XGLMTokenizer(snake_case__ , keep_accents=snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def a ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def a ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(snake_case__ , f.name )
_lowerCAmelCase : int = XGLMTokenizer(f.name , keep_accents=snake_case__ )
_lowerCAmelCase : Any = pickle.dumps(snake_case__ )
pickle.loads(snake_case__ )
def a ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Any = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = 'I was born in 92000, and this is falsé.'
_lowerCAmelCase : List[Any] = tokenizer.tokenize(snake_case__ )
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase : List[str] = self.get_rust_tokenizer()
_lowerCAmelCase : int = tokenizer.encode(snake_case__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'Hello World!'
_lowerCAmelCase : Optional[Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
_lowerCAmelCase : int = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/xglm-564M' , padding=snake_case__ , )
| 368 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mobilenet_v2"
def __init__( self , snake_case__=3 , snake_case__=224 , snake_case__=1.0 , snake_case__=8 , snake_case__=8 , snake_case__=6 , snake_case__=32 , snake_case__=True , snake_case__=True , snake_case__="relu6" , snake_case__=True , snake_case__=0.8 , snake_case__=0.02 , snake_case__=0.001 , snake_case__=255 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : List[Any] = depth_multiplier
_lowerCAmelCase : List[Any] = depth_divisible_by
_lowerCAmelCase : Optional[Any] = min_depth
_lowerCAmelCase : str = expand_ratio
_lowerCAmelCase : str = output_stride
_lowerCAmelCase : Any = first_layer_is_expansion
_lowerCAmelCase : int = finegrained_output
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = tf_padding
_lowerCAmelCase : Optional[int] = classifier_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : str = semantic_loss_ignore_index
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = version.parse("1.11" )
@property
def a ( self ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def a ( self ):
'''simple docstring'''
return 1E-4
| 25 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowercase (_A , _A ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : Optional[int] = 'pt'
_lowerCAmelCase : Tuple = 'tf'
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case__ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ )
model_tf.save_pretrained(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case__ )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case__ )
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ )
self.assertEqual(snake_case__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Any = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : int = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case__ , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : str = MagicMock(return_value=snake_case__ )
_lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ )
with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch(
'transformers.onnx.features.is_torch_available' , snake_case__ ):
with self.assertRaises(snake_case__ ):
_lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
| 25 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowercase (_A = 3 ):
"""simple docstring"""
if isinstance(_A , _A ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 1_0:
raise ValueError('number of qubits too large to simulate(>10).' )
_lowerCAmelCase : Optional[int] = QuantumRegister(_A , 'qr' )
_lowerCAmelCase : int = ClassicalRegister(_A , 'cr' )
_lowerCAmelCase : Tuple = QuantumCircuit(_A , _A )
_lowerCAmelCase : Any = number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
_lowerCAmelCase : Dict = Aer.get_backend('qasm_simulator' )
_lowerCAmelCase : str = execute(_A , _A , shots=1_0_0_0_0 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
'''simple docstring'''
lowerCAmelCase : int = 2_56
# Modulus to hash a string
lowerCAmelCase : Tuple = 1_00_00_03
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : int = len(_A )
_lowerCAmelCase : Union[str, Any] = len(_A )
if p_len > t_len:
return False
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : int = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
_lowerCAmelCase : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowerCAmelCase : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowerCAmelCase : List[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowerCAmelCase : Dict = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : str = 'abc1abc12'
_lowerCAmelCase : Union[str, Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_lowerCAmelCase : Union[str, Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
_lowerCAmelCase : Union[str, Any] = 'ABABX'
_lowerCAmelCase : Any = 'ABABZABABYABABX'
assert rabin_karp(_A , _A )
# Test 3)
_lowerCAmelCase : List[Any] = 'AAAB'
_lowerCAmelCase : int = 'ABAAAAAB'
assert rabin_karp(_A , _A )
# Test 4)
_lowerCAmelCase : Any = 'abcdabcy'
_lowerCAmelCase : str = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_A , _A )
# Test 5)
_lowerCAmelCase : Any = 'Lü'
_lowerCAmelCase : List[str] = 'Lüsai'
assert rabin_karp(_A , _A )
_lowerCAmelCase : int = 'Lue'
assert not rabin_karp(_A , _A )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCAmelCase__ = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''ernie_m'''
__snake_case = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[Any] , __UpperCAmelCase : int = 250_002 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 3_072 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 514 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 1e-0_5 , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Any=0.0 , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = layer_norm_eps
a = classifier_dropout
a = is_decoder
a = act_dropout
| 26 |
def _a ( a :int = 600_851_475_143 ) -> int:
try:
a = int(a )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
a = 2
a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a = i
while n % i == 0:
a = n // i
i += 1
return int(a )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 26 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''xmod'''
def __init__( self : Tuple , __UpperCAmelCase : str=30_522 , __UpperCAmelCase : Optional[int]=768 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Optional[Any]=3_072 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Tuple=512 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Tuple=1e-1_2 , __UpperCAmelCase : Dict=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Dict="absolute" , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : int=2 , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Optional[int]=("en_XX",) , __UpperCAmelCase : List[Any]=None , **__UpperCAmelCase : Any , ) ->Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = classifier_dropout
a = pre_norm
a = adapter_reduction_factor
a = adapter_layer_norm
a = adapter_reuse_layer_norm
a = ln_before_adapter
a = list(__UpperCAmelCase )
a = default_language
class lowercase_ ( lowercase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Any ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 26 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __lowerCAmelCase ( self : int , **__UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
a = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 26 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]:
hf_model.apply_weight_norm()
a = checkpoint['''input_conv.weight_g''']
a = checkpoint['''input_conv.weight_v''']
a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a = checkpoint[F"""upsamples.{i}.1.weight_g"""]
a = checkpoint[F"""upsamples.{i}.1.weight_v"""]
a = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
a = checkpoint['''output_conv.1.weight_g''']
a = checkpoint['''output_conv.1.weight_v''']
a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(a )
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(a )
a = torch.load(a )
load_weights(orig_checkpoint['''model''']['''generator'''] , a , a )
a = np.load(a )
a = stats[0].reshape(-1 )
a = stats[1].reshape(-1 )
a = torch.from_numpy(a ).float()
a = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 26 | 1 |
def _a ( a :float , a :int ) -> float:
if digit_amount > 0:
return round(number - int(a ) , a )
return number - int(a )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
'''simple docstring'''
def __init__( self : List[str] , __UpperCAmelCase : list[tuple[float, float]] ) ->Any:
"""simple docstring"""
a = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
a = len(__UpperCAmelCase ) - 1
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : float ) ->list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__UpperCAmelCase ) , 5 ) == 1
return output_values
def __lowerCAmelCase ( self : str , __UpperCAmelCase : float ) ->tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
a = self.basis_function(__UpperCAmelCase )
a = 0.0
a = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : float = 0.01 ) ->List[str]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
a = [] # x coordinates of points to plot
a = [] # y coordinates of points to plot
a = 0.0
while t <= 1:
a = self.bezier_curve_function(__UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
a = [i[0] for i in self.list_of_points]
a = [i[1] for i in self.list_of_points]
plt.plot(
__UpperCAmelCase , __UpperCAmelCase , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 26 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( a :Tuple ) -> int:
a = tmp_path / '''file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :int ) -> List[str]:
a = tmp_path / '''malformed_file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Dict , a :int ) -> List[str]:
a = tmp_path / '''csv_with_image.csv'''
a = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :List[Any] ) -> Dict:
a = tmp_path / '''csv_with_label.csv'''
a = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Tuple ) -> Any:
a = tmp_path / '''csv_with_int_list.csv'''
a = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> List[Any]:
a = Csv()
a = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a ) in record.message
for record in caplog.records )
@require_pil
def _a ( a :Dict ) -> Any:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1]
a = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
a = csv._generate_tables([[csv_file_with_image]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
a = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( a :Any ) -> Tuple:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1:]
a = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
a = csv._generate_tables([[csv_file_with_label]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
a = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels]
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} )
a = csv._generate_tables([[csv_file_with_int_list]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
a = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 26 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def _a ( a :List[str] ) -> List[str]:
if "cls_token" in name:
a = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
a = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
a = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
a = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
a = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
a = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
a = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
a = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
a = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
a = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
a = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
a = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
a = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def _a ( a :List[Any] , a :Dict ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(a )
if "qkv" in key:
a = key.split('''.''' )
a = int(key_split[1] )
if "decoder_blocks" in key:
a = config.decoder_hidden_size
a = '''decoder.decoder_layers.'''
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
elif "bias" in key:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
else:
a = config.hidden_size
a = '''vit.encoder.layer.'''
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
elif "bias" in key:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
else:
a = val
return orig_state_dict
def _a ( a :Dict , a :int ) -> List[Any]:
a = ViTMAEConfig()
if "large" in checkpoint_url:
a = 1_024
a = 4_096
a = 24
a = 16
elif "huge" in checkpoint_url:
a = 14
a = 1_280
a = 5_120
a = 32
a = 16
a = ViTMAEForPreTraining(a )
a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )['''model''']
a = ViTMAEImageProcessor(size=config.image_size )
a = convert_state_dict(a , a )
model.load_state_dict(a )
model.eval()
a = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
a = ViTMAEImageProcessor(size=config.image_size )
a = image_processor(images=a , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
a = model(**a )
a = outputs.logits
if "large" in checkpoint_url:
a = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
a = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
a = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , a , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 26 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = torch.device("cpu")
def _a ( ) -> Union[str, Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
def _a ( a :Dict ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _a ( a :int , a :Any , a :Union[str, Any] ) -> int:
a = dct.pop(a )
a = val
def _a ( a :Any ) -> Dict:
a = []
for k in state_dict.keys():
a = k
if ".pwconv" in k:
a = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
a = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
a = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
a = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
a = k_new.split('''.''' )
if ls[2].isdigit():
a = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
a = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( a :List[Any] , a :Tuple , a :List[str] ) -> Union[str, Any]:
a = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a = [3, 3, 6, 4]
a = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a = [3, 3, 9, 6]
a = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a = [4, 3, 10, 5]
a = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a = [4, 4, 12, 6]
a = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' , check_hash=a )
else:
a = torch.load(a , map_location='''cpu''' )
a = checkpoint
a = create_rename_keys(a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a , a , a )
# load HuggingFace model
a = SwiftFormerForImageClassification(a ).eval()
hf_model.load_state_dict(a )
# prepare test inputs
a = prepare_img()
a = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
a = processor(images=a , return_tensors='''pt''' )
# compare outputs from both models
a = get_expected_output(a )
a = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , a , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCAmelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 26 | 1 |
from collections import namedtuple
UpperCAmelCase__ = namedtuple("from_to", "from_ to")
UpperCAmelCase__ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def _a ( a :float , a :str , a :str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = value_function
a = unet
a = scheduler
a = env
a = env.get_dataset()
a = {}
for key in self.data.keys():
try:
a = self.data[key].mean()
except: # noqa: E722
pass
a = {}
for key in self.data.keys():
try:
a = self.data[key].std()
except: # noqa: E722
pass
a = env.observation_space.shape[0]
a = env.action_space.shape[0]
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
if type(__UpperCAmelCase ) is dict:
return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__UpperCAmelCase , device=self.unet.device )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
for key, val in cond.items():
a = val.clone()
return x_in
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = x.shape[0]
a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
a = torch.full((batch_size,) , __UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a = self.value_function(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample
a = torch.autograd.grad([y.sum()] , [x] )[0]
a = self.scheduler._get_variance(__UpperCAmelCase )
a = torch.exp(0.5 * posterior_variance )
a = model_std * grad
a = 0
a = x.detach()
a = x + scale * grad
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.unet(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , predict_epsilon=__UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.1 ) ->List[str]:
"""simple docstring"""
a = self.normalize(__UpperCAmelCase , '''observations''' )
a = obs[None].repeat(__UpperCAmelCase , axis=0 )
a = {0: self.to_torch(__UpperCAmelCase )}
a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a = randn_tensor(__UpperCAmelCase , device=self.unet.device )
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
# run the diffusion process
a , a = self.run_diffusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# sort output trajectories by value
a = y.argsort(0 , descending=__UpperCAmelCase ).squeeze()
a = x[sorted_idx]
a = sorted_values[:, :, : self.action_dim]
a = actions.detach().cpu().numpy()
a = self.de_normalize(__UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
a = 0
else:
# if we didn't run value guiding, select a random action
a = np.random.randint(0 , __UpperCAmelCase )
a = denorm_actions[selected_index, 0]
return denorm_actions
| 26 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase_ ( metaclass=lowercase ):
'''simple docstring'''
__snake_case = ['''note_seq''']
def __init__( self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Optional[int] ) ->List[Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls : int , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 26 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[Any]="<sep>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Any="<cls>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : Optional[int]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) ->None:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = 3
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
a = jieba
a = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : List[str] , __UpperCAmelCase : Optional[int] ) ->str:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
if self.remove_space:
a = ''' '''.join(inputs.strip().split() )
else:
a = inputs
a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
a = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
a = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
a = outputs.lower()
return outputs
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = self.preprocess_text(__UpperCAmelCase )
a = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
a = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a = cur_pieces[1:]
else:
a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any ) ->Any:
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict ) ->Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
a = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 26 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = 0
@slow
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__UpperCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__UpperCAmelCase ) , 0 )
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = AutoConfig.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
# Check that tokenizer_type ≠ model_type
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__UpperCAmelCase , '''vocab.txt''' ) )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type='''bert''' , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__UpperCAmelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__UpperCAmelCase , '''merges.txt''' ) )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type='''gpt2''' , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@require_tokenizers
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__UpperCAmelCase , '''vocab.txt''' ) )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__UpperCAmelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__UpperCAmelCase , '''merges.txt''' ) )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
with pytest.raises(__UpperCAmelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
a = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __UpperCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__UpperCAmelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
a = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
a = TOKENIZER_MAPPING.values()
a = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__UpperCAmelCase )
@require_tokenizers
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__UpperCAmelCase ) , __UpperCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __UpperCAmelCase )
@require_tokenizers
def __lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
a = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__UpperCAmelCase )
a = '''Hello, world. How are you?'''
a = tokenizer.tokenize(__UpperCAmelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
a = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__UpperCAmelCase )
a = tokenizer.tokenize(__UpperCAmelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def __lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
a = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
a = get_tokenizer_config('''bert-base-cased''' )
a = config.pop('''_commit_hash''' , __UpperCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__UpperCAmelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
a = get_tokenizer_config(__UpperCAmelCase )
self.assertDictEqual(__UpperCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
a = get_tokenizer_config(__UpperCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
a = CustomTokenizer.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __UpperCAmelCase )
# Can register in two steps
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCAmelCase ):
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
a = BertTokenizerFast.from_pretrained(__UpperCAmelCase )
bert_tokenizer.save_pretrained(__UpperCAmelCase )
a = CustomTokenizerFast.from_pretrained(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
with self.assertRaises(__UpperCAmelCase ):
a = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCAmelCase ):
a = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__UpperCAmelCase )
a = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
a = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCAmelCase )
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = False
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = NewTokenizer
__snake_case = False
try:
AutoConfig.register('''custom''' , __UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , slow_tokenizer_class=__UpperCAmelCase )
AutoTokenizer.register(__UpperCAmelCase , fast_tokenizer_class=__UpperCAmelCase )
# If remote code is not set, the default is to use local
a = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
a = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
a = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
a = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
a = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__UpperCAmelCase , use_fast=__UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
a = AutoTokenizer.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
with self.assertRaisesRegex(
__UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
a = AutoTokenizer.from_pretrained(__UpperCAmelCase , revision='''aaaaaa''' )
def __lowerCAmelCase ( self : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 26 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( a :Union[str, Any] , a :List[Any] ) -> List[Any]:
a = checkpoint
a = {}
a = vae_state_dict['''encoder.conv_in.weight''']
a = vae_state_dict['''encoder.conv_in.bias''']
a = vae_state_dict['''encoder.conv_out.weight''']
a = vae_state_dict['''encoder.conv_out.bias''']
a = vae_state_dict['''encoder.norm_out.weight''']
a = vae_state_dict['''encoder.norm_out.bias''']
a = vae_state_dict['''decoder.conv_in.weight''']
a = vae_state_dict['''decoder.conv_in.bias''']
a = vae_state_dict['''decoder.conv_out.weight''']
a = vae_state_dict['''decoder.conv_out.bias''']
a = vae_state_dict['''decoder.norm_out.weight''']
a = vae_state_dict['''decoder.norm_out.bias''']
a = vae_state_dict['''quant_conv.weight''']
a = vae_state_dict['''quant_conv.bias''']
a = vae_state_dict['''post_quant_conv.weight''']
a = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a )
}
for i in range(a ):
a = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
a = num_up_blocks - 1 - i
a = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def _a ( a :str , a :str , ) -> List[str]:
# Only support V1
a = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
a = io.BytesIO(r.content )
a = OmegaConf.load(a )
a = 512
a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
a = {}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
a = f.get_tensor(a )
else:
a = torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
a = create_vae_diffusers_config(a , image_size=a )
a = custom_convert_ldm_vae_checkpoint(a , a )
a = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 26 | 1 |
UpperCAmelCase__ = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''CLIPImageProcessor'''
__snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str ) ->Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26 | 1 |
def _a ( a :str , a :str ) -> str:
a = len(a )
a = len(a )
a = (
first_str_length if first_str_length > second_str_length else second_str_length
)
a = []
for char_count in range(a ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(a )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 26 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
UpperCAmelCase__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = ['''input_ids''', '''attention_mask''']
__snake_case = DistilBertTokenizer
def __init__( self : Dict , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[int]="[UNK]" , __UpperCAmelCase : str="[SEP]" , __UpperCAmelCase : Tuple="[PAD]" , __UpperCAmelCase : Any="[CLS]" , __UpperCAmelCase : int="[MASK]" , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : str , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None ) ->Optional[Any]:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 26 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''BlipImageProcessor'''
__snake_case = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ) ->List[Any]:
"""simple docstring"""
a = False
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
a = self.image_processor
def __call__( self : List[str] , __UpperCAmelCase : ImageInput = None , __UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , **__UpperCAmelCase : Dict , ) ->BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
a = self.tokenizer
a = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
return text_encoding
# add pixel_values
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
if text is not None:
a = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
else:
a = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCAmelCase )
return encoding_image_processor
def __lowerCAmelCase ( self : Union[str, Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : List[Any] ) ->Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 26 |
from __future__ import annotations
import typing
from collections import Counter
def _a ( a :int ) -> typing.Counter[int]:
a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a , max_perimeter + 1 ):
a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a ):
a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _a ( a :int = 1_000 ) -> int:
a = pythagorean_triple(a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 26 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _a ( a :List[str] , a :Tuple , a :Union[str, Any] , a :Optional[int] ) -> int:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
a = TOKENIZER_CLASSES
else:
a = {tokenizer_name: getattr(a , tokenizer_name + '''Fast''' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
a = TOKENIZER_CLASSES[tokenizer_name]
a = True
if checkpoint_name is None:
a = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
a = tokenizer_class.from_pretrained(a , force_download=a )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
a , a = checkpoint.split('''/''' )
a = os.path.join(a , a )
elif add_prefix:
a = checkpoint
a = dump_path
else:
a = None
a = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a = file_path.split(a )[-1][0]
if next_char == "/":
a = os.path.join(a , a )
a = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
a = tokenizer.save_pretrained(
a , legacy_format=a , filename_prefix=a )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(a )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
UpperCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 26 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase__ = 16
UpperCAmelCase__ = 32
def _a ( a :Dict ) -> Dict:
return int(x / 2**20 )
class lowercase_ :
'''simple docstring'''
def __enter__( self : List[str] ) ->Optional[int]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a = torch.cuda.memory_allocated()
return self
def __exit__( self : Optional[Any] , *__UpperCAmelCase : str ) ->int:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
a = torch.cuda.memory_allocated()
a = torch.cuda.max_memory_allocated()
a = bamb(self.end - self.begin )
a = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _a ( a :Accelerator , a :int = 16 , a :str = "bert-base-cased" , a :int = 320 , a :int = 160 , ) -> Optional[int]:
a = AutoTokenizer.from_pretrained(a )
a = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"""train[:{n_train}]""", '''validation''': F"""validation[:{n_val}]"""} )
def tokenize_function(a :int ):
# max_length=None => use the model max length (it's actually the default)
a = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a = datasets.map(
a , batched=a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(a :str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
a = DataLoader(
tokenized_datasets['''train'''] , shuffle=a , collate_fn=a , batch_size=a )
a = DataLoader(
tokenized_datasets['''validation'''] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
def _a ( a :Any , a :str ) -> int:
# Initialize accelerator
a = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config['''lr''']
a = int(config['''num_epochs'''] )
a = int(config['''seed'''] )
a = int(config['''batch_size'''] )
a = args.model_name_or_path
set_seed(a )
a , a = get_dataloaders(a , a , a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = AutoModelForSequenceClassification.from_pretrained(a , return_dict=a )
# Instantiate optimizer
a = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a = optimizer_cls(params=model.parameters() , lr=a )
if accelerator.state.deepspeed_plugin is not None:
a = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
a = 1
a = (len(a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=0 , num_training_steps=a , )
else:
a = DummyScheduler(a , total_num_steps=a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
a , a , a , a , a )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the stating epoch so files are named properly
a = 0
# Now we train the model
a = {}
for epoch in range(a , a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(a ):
a = model(**a )
a = outputs.loss
a = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
a = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(a , a )
def _a ( ) -> List[Any]:
a = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=a , )
parser.add_argument(
'''--output_dir''' , type=a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=a , default=a , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=a , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=a , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=a , default=1 , help='''Number of train epochs.''' , )
a = parser.parse_args()
a = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 26 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase__ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase__ = 10
UpperCAmelCase__ = 256
def _a ( a :List[str] ) -> Optional[MinHash]:
if len(a ) < MIN_NUM_TOKENS:
return None
a = MinHash(num_perm=a )
for token in set(a ):
min_hash.update(token.encode() )
return min_hash
def _a ( a :str ) -> Set[str]:
return {t for t in NON_ALPHA.split(a ) if len(t.strip() ) > 0}
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , *,
__UpperCAmelCase : float = 0.85 , ) ->Dict:
"""simple docstring"""
a = duplication_jaccard_threshold
a = NUM_PERM
a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a = defaultdict(__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : MinHash ) ->None:
"""simple docstring"""
a = self._index.query(__UpperCAmelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->List[List[Dict]]:
"""simple docstring"""
a = []
for base, duplicates in self._duplicate_clusters.items():
a = [base] + list(__UpperCAmelCase )
# reformat the cluster to be a list of dict
a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__UpperCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict ) ->None:
"""simple docstring"""
a = self.get_duplicate_clusters()
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def _a ( a :List[Any] ) -> List[Any]:
a , a = element
a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _a ( a :Type[Dataset] ) -> List[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def _a ( a :Type[Dataset] , a :float ) -> str:
a = DuplicationIndex(duplication_jaccard_threshold=a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a ) ) , max_queue_size=100 ) ):
di.add(a , a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _a ( a :str , a :str ) -> float:
a = get_tokens(a )
a = get_tokens(a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase__ = None
def _a ( a :Tuple , a :Tuple ) -> Any:
a = []
for elementa in cluster:
a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(a , a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a = 1
extremes.append(a )
return extremes
def _a ( a :List[Any] , a :Optional[Any] , a :Union[str, Any] ) -> Optional[int]:
global _shared_dataset
a = dataset
a = []
a = partial(_find_cluster_extremes_shared , jaccard_threshold=a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a , a , ) , total=len(a ) , ):
extremes_list.append(a )
return extremes_list
def _a ( a :Type[Dataset] , a :float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
a = make_duplicate_clusters(a , a )
a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
a = {}
a = find_extremes(a , a , a )
for extremes in extremes_clusters:
for element in extremes:
a = element
a = duplicate_indices - set(extreme_dict.keys() )
a = dataset.filter(lambda a , a : idx not in remove_indices , with_indices=a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
a = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(a )}""" )
print(F"""Number of duplicate clusters: {len(a )}""" )
print(F"""Files in duplicate cluster: {len(a )}""" )
print(F"""Unique files in duplicate cluster: {len(a )}""" )
print(F"""Filtered dataset size: {len(a )}""" )
return ds_filter, duplicate_clusters
| 26 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Union[str, Any]=30 , __UpperCAmelCase : Optional[Any]=400 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int=None , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[int]=1 / 255 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=[0.5, 0.5, 0.5] , __UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , __UpperCAmelCase : Any=True , ) ->Dict:
"""simple docstring"""
a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
a = parent
a = batch_size
a = num_channels
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_rescale
a = rescale_factor
a = do_normalize
a = image_mean
a = image_std
a = do_pad
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any=False ) ->int:
"""simple docstring"""
if not batched:
a = image_inputs[0]
if isinstance(__UpperCAmelCase , Image.Image ):
a , a = image.size
else:
a , a = image.shape[1], image.shape[2]
if w < h:
a = int(self.size['''shortest_edge'''] * h / w )
a = self.size['''shortest_edge''']
elif w > h:
a = self.size['''shortest_edge''']
a = int(self.size['''shortest_edge'''] * w / h )
else:
a = self.size['''shortest_edge''']
a = self.size['''shortest_edge''']
else:
a = []
for image in image_inputs:
a , a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0]
a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = DetrImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = DetrImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''rescale_factor''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_pad''' ) )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
a , a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a = json.loads(f.read() )
a = {'''image_id''': 39_769, '''annotations''': target}
# encode them
a = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
a = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify orig_size
a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
a = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a = json.loads(f.read() )
a = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
a = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
a = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase )
a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) )
# verify area
a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) )
# verify boxes
a = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase )
a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) )
# verify image_id
a = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) )
# verify is_crowd
a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) )
# verify class_labels
a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) )
# verify masks
a = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase )
# verify orig_size
a = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) )
# verify size
a = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
| 26 |
from math import ceil, sqrt
def _a ( a :int = 1_000_000 ) -> int:
a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :Union[str, Any] ) -> Any:
a = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
a = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
a = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
a = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(a )-1}""" )
if "norm" in key:
a = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
a = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(a )-1}""" )
if "layer_norm1" in key:
a = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
a = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
a = key[key.find('''block''' ) + len('''block''' )]
a = key.replace(F"""block{idx}""" , F"""block.{int(a )-1}""" )
if "attn.q" in key:
a = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
a = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
a = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
a = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
a = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
a = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
a = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
a = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a = key[key.find('''linear_c''' ) + len('''linear_c''' )]
a = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(a )-1}""" )
if "bot_conv" in key:
a = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
a = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
a = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
a = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
a = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
a = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
a = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
a = key.replace('''module.last_layer_depth''' , '''head.head''' )
a = value
return new_state_dict
def _a ( a :List[str] , a :str ) -> List[Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
a = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
a = kv_weight[
: config.hidden_sizes[i], :
]
a = kv_bias[: config.hidden_sizes[i]]
a = kv_weight[
config.hidden_sizes[i] :, :
]
a = kv_bias[config.hidden_sizes[i] :]
def _a ( ) -> Tuple:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def _a ( a :int , a :Dict , a :Optional[Any]=False , a :Dict=None ) -> Any:
a = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
a = GLPNImageProcessor()
# prepare image
a = prepare_img()
a = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
a = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
a = rename_keys(a )
# key and value matrices need special treatment
read_in_k_v(a , a )
# create HuggingFace model and load state dict
a = GLPNForDepthEstimation(a )
model.load_state_dict(a )
model.eval()
# forward pass
a = model(a )
a = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
a = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
a = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
a = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , a , atol=1e-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(a , a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=a , )
image_processor.push_to_hub(
repo_path_or_name=Path(a , a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=a , )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
UpperCAmelCase__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 26 |
UpperCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 26 | 1 |
def _a ( a :str ) -> int:
assert column_title.isupper()
a = 0
a = len(a ) - 1
a = 0
while index >= 0:
a = (ord(column_title[index] ) - 64) * pow(26 , a )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 |
def _a ( a :list ) -> list:
if len(a ) <= 1:
return lst
a = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a , a = lst[i], lst[i - 1]
i -= 1
if i == 0:
a = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 26 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
UpperCAmelCase__ = None
UpperCAmelCase__ = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
UpperCAmelCase__ = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def _a ( a :Optional[int] , a :List[str]=1 , a :List[str]=256 ) -> List[Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( a :str ) -> Optional[int]:
with open(a , '''r''' ) as f:
return json.load(a )
def _a ( a :List[Any] , a :List[Any] ) -> Tuple:
with open(a , '''w''' ) as f:
json.dump(a , a )
def _a ( a :int , a :Tuple , a :Union[str, Any] , a :Dict=True ) -> str:
os.makedirs(a , exist_ok=a )
a = os.path.join(a , '''tmp''' )
os.makedirs(a , exist_ok=a )
a = read_json(os.path.join(a , '''params.json''' ) )
a = NUM_SHARDS[model_size]
a = params['''n_layers''']
a = params['''n_heads''']
a = n_heads // num_shards
a = params['''dim''']
a = dim // n_heads
a = 10_000.0
a = 1.0 / (base ** (torch.arange(0 , a , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
a = params['''n_kv_heads'''] # for GQA / MQA
a = n_heads_per_shard // num_key_value_heads
a = dim // num_key_value_heads
else: # compatibility with other checkpoints
a = n_heads
a = n_heads_per_shard
a = dim
# permute for sliced rotary
def permute(a :Tuple , a :Dict=n_heads , a :Tuple=dim , a :Dict=dim ):
return w.view(a , dima // n_heads // 2 , 2 , a ).transpose(1 , 2 ).reshape(a , a )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
a = torch.load(os.path.join(a , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
a = [
torch.load(os.path.join(a , F"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(a )
]
a = 0
a = {'''weight_map''': {}}
for layer_i in range(a ):
a = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
a = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
a = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
a = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(a , a , a )
for i in range(a )
] , dim=0 , ).reshape(a , a ) )
a = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
a , a , a )
for i in range(a )
] , dim=0 , ).reshape(a , a ) , a , a , a , )
a = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
a , a , a )
for i in range(a )
] , dim=0 , ).reshape(a , a )
a = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(a )] , dim=1 )
a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(a )] , dim=0 )
a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(a )] , dim=1 )
a = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(a )] , dim=0 )
a = inv_freq
for k, v in state_dict.items():
a = filename
param_count += v.numel()
torch.save(a , os.path.join(a , a ) )
a = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
a = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
a = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(a )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(a )] , dim=0 ),
}
for k, v in state_dict.items():
a = filename
param_count += v.numel()
torch.save(a , os.path.join(a , a ) )
# Write configs
a = {'''total_size''': param_count * 2}
write_json(a , os.path.join(a , '''pytorch_model.bin.index.json''' ) )
a = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
a = params['''multiple_of'''] if '''multiple_of''' in params else 256
a = LlamaConfig(
hidden_size=a , intermediate_size=compute_intermediate_size(a , a , a ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=a , )
config.save_pretrained(a )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
a = LlamaForCausalLM.from_pretrained(a , torch_dtype=torch.floataa , low_cpu_mem_usage=a )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(a , safe_serialization=a )
shutil.rmtree(a )
def _a ( a :Union[str, Any] , a :List[Any] ) -> Union[str, Any]:
# Initialize the tokenizer based on the `spm` model
a = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
a = tokenizer_class(a )
tokenizer.save_pretrained(a )
def _a ( ) -> Tuple:
a = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=a , help='''Whether or not to save using `safetensors`.''' )
a = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
a = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , a )
if __name__ == "__main__":
main()
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from math import factorial
def _a ( a :int , a :int , a :float ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(a , a ) or not isinstance(a , a ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
a = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a = float(factorial(a ) )
coefficient /= factorial(a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 26 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( a :str ) -> Any:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(a )
a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def _a ( a :Union[str, os.PathLike] , a :Optional[Union[str, os.PathLike]] = None , a :bool = False , a :bool = False , a :Optional[Dict[str, str]] = None , a :Optional[Union[bool, str]] = None , a :Optional[str] = None , a :bool = False , **a :int , ) -> Tuple:
a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple ) ->int:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def __lowerCAmelCase ( cls : int , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase )
a = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
a = config_dict.get('''feature_extractor_type''' , __UpperCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__UpperCAmelCase , '''feature_extractor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__UpperCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
a = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 26 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 42
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Dict , __UpperCAmelCase : PriorTransformer , __UpperCAmelCase : CLIPVisionModel , __UpperCAmelCase : CLIPImageProcessor , __UpperCAmelCase : HeunDiscreteScheduler , __UpperCAmelCase : ShapERenderer , ) ->List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=__UpperCAmelCase , image_encoder=__UpperCAmelCase , image_processor=__UpperCAmelCase , scheduler=__UpperCAmelCase , renderer=__UpperCAmelCase , )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) ->int:
"""simple docstring"""
if latents is None:
a = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a = latents.to(__UpperCAmelCase )
a = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str]=0 ) ->Tuple:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a = torch.device(F"""cuda:{gpu_id}""" )
a = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__UpperCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , ) ->str:
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(image[0] , torch.Tensor ):
a = torch.cat(__UpperCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__UpperCAmelCase , axis=0 )
if not isinstance(__UpperCAmelCase , torch.Tensor ):
a = self.image_processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
a = image.to(dtype=self.image_encoder.dtype , device=__UpperCAmelCase )
a = self.image_encoder(__UpperCAmelCase )['''last_hidden_state''']
a = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
a = torch.zeros_like(__UpperCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self : Optional[Any] , __UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 25 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) ->Dict:
"""simple docstring"""
if isinstance(__UpperCAmelCase , PIL.Image.Image ):
a = 1
elif isinstance(__UpperCAmelCase , torch.Tensor ):
a = image.shape[0]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
a = len(__UpperCAmelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__UpperCAmelCase )}""" )
a = self._execution_device
a = batch_size * num_images_per_prompt
a = guidance_scale > 1.0
a = self._encode_image(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# prior
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase )
a = self.scheduler.timesteps
a = self.prior.config.num_embeddings
a = self.prior.config.embedding_dim
a = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a = latents.reshape(latents.shape[0] , __UpperCAmelCase , __UpperCAmelCase )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
a = self.prior(
__UpperCAmelCase , timestep=__UpperCAmelCase , proj_embedding=__UpperCAmelCase , ).predicted_image_embedding
# remove the variance
a , a = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a , a = noise_pred.chunk(2 )
a = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a = self.scheduler.step(
__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__UpperCAmelCase )
a = []
for i, latent in enumerate(__UpperCAmelCase ):
print()
a = self.renderer.decode(
latent[None, :] , __UpperCAmelCase , size=__UpperCAmelCase , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__UpperCAmelCase )
a = torch.stack(__UpperCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
a = images.cpu().numpy()
if output_type == "pil":
a = [self.numpy_to_pil(__UpperCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__UpperCAmelCase )
| 26 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __lowerCAmelCase ( self : int , **__UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
a = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 26 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( a :Tuple ) -> int:
a = tmp_path / '''file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :int ) -> List[str]:
a = tmp_path / '''malformed_file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Dict , a :int ) -> List[str]:
a = tmp_path / '''csv_with_image.csv'''
a = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :List[Any] ) -> Dict:
a = tmp_path / '''csv_with_label.csv'''
a = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Tuple ) -> Any:
a = tmp_path / '''csv_with_int_list.csv'''
a = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> List[Any]:
a = Csv()
a = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a ) in record.message
for record in caplog.records )
@require_pil
def _a ( a :Dict ) -> Any:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1]
a = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
a = csv._generate_tables([[csv_file_with_image]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
a = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( a :Any ) -> Tuple:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1:]
a = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
a = csv._generate_tables([[csv_file_with_label]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
a = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels]
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} )
a = csv._generate_tables([[csv_file_with_int_list]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
a = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 26 |
import math
def _a ( a :int = 100 ) -> int:
a = sum(i * i for i in range(1 , n + 1 ) )
a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
import unittest
import numpy as np
def _a ( a :np.ndarray , a :np.ndarray , a :np.ndarray , a :np.ndarray | None = None , ) -> np.ndarray:
a = np.shape(a )
a = np.shape(a )
a = np.shape(a )
if shape_a[0] != shape_b[0]:
a = (
'''Expected the same number of rows for A and B. '''
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(a )
if shape_b[1] != shape_c[1]:
a = (
'''Expected the same number of columns for B and C. '''
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(a )
a = pseudo_inv
if a_inv is None:
try:
a = np.linalg.inv(a )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Dict ) ->None:
"""simple docstring"""
a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a = np.array([[0, 3], [3, 0], [2, 3]] )
a = np.array([[2, 1], [6, 3]] )
a = schur_complement(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = np.block([[a, b], [b.T, c]] )
a = np.linalg.det(__UpperCAmelCase )
a = np.linalg.det(__UpperCAmelCase )
a = np.linalg.det(__UpperCAmelCase )
self.assertAlmostEqual(__UpperCAmelCase , det_a * det_s )
def __lowerCAmelCase ( self : Union[str, Any] ) ->None:
"""simple docstring"""
a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a = np.array([[0, 3], [3, 0], [2, 3]] )
a = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__UpperCAmelCase ):
schur_complement(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->None:
"""simple docstring"""
a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
a = np.array([[0, 3], [3, 0], [2, 3]] )
a = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__UpperCAmelCase ):
schur_complement(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 26 |
def _a ( a :int = 600_851_475_143 ) -> int:
try:
a = int(a )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
a = 2
a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a = i
while n % i == 0:
a = n // i
i += 1
return int(a )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
UpperCAmelCase__ = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CamembertTokenizer
__snake_case = CamembertTokenizerFast
__snake_case = True
__snake_case = True
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = CamembertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = '''<pad>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_004 )
def __lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = CamembertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
a = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
a = '''I was born in 92000, and this is falsé.'''
a = tokenizer.encode(__UpperCAmelCase )
a = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
a = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
a = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = '''I was born in 92000, and this is falsé.'''
a = tokenizer.tokenize(__UpperCAmelCase )
a = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
a = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__UpperCAmelCase )
a = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = {'''input_ids''': [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=__UpperCAmelCase , )
| 26 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCAmelCase__ = logging.getLogger(__name__)
class lowercase_ :
'''simple docstring'''
def __init__( self : str ) ->Any:
"""simple docstring"""
a = False
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
if not self.initialized:
a = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
a = True
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
self.retriever.index.init_index()
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
a , a = self.retriever._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return doc_ids, retrieved_doc_embeds
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None ) ->Optional[Any]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(__UpperCAmelCase ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
a = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple ) ->Dict:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a , a = ray.get(random_worker.retrieve.remote(__UpperCAmelCase , __UpperCAmelCase ) )
else:
a , a = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase )
@classmethod
def __lowerCAmelCase ( cls : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] ) ->List[Any]:
"""simple docstring"""
return super(__UpperCAmelCase , cls ).get_tokenizers(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : int ) ->str:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase ) or RagConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
a = RagTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
a = rag_tokenizer.question_encoder
a = rag_tokenizer.generator
if indexed_dataset is not None:
a = '''custom'''
a = CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase )
else:
a = cls._build_index(__UpperCAmelCase )
return cls(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , retrieval_workers=__UpperCAmelCase , index=__UpperCAmelCase , )
| 26 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ) ->None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 26 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]:
hf_model.apply_weight_norm()
a = checkpoint['''input_conv.weight_g''']
a = checkpoint['''input_conv.weight_v''']
a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a = checkpoint[F"""upsamples.{i}.1.weight_g"""]
a = checkpoint[F"""upsamples.{i}.1.weight_v"""]
a = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
a = checkpoint['''output_conv.1.weight_g''']
a = checkpoint['''output_conv.1.weight_v''']
a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(a )
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(a )
a = torch.load(a )
load_weights(orig_checkpoint['''model''']['''generator'''] , a , a )
a = np.load(a )
a = stats[0].reshape(-1 )
a = stats[1].reshape(-1 )
a = torch.from_numpy(a ).float()
a = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 26 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , __UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , **__UpperCAmelCase : str , ) ->Any:
"""simple docstring"""
super().__init__(features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , **__UpperCAmelCase )
a = Sql(
cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , sql=__UpperCAmelCase , con=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , )
# Build dataset for splits
a = self.builder.as_dataset(
split='''train''' , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dataset , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : Any , ) ->str:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
a = dataset
a = name
a = con
a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a = num_proc
a = to_sql_kwargs
def __lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
a = self.to_sql_kwargs.pop('''sql''' , __UpperCAmelCase )
a = self.to_sql_kwargs.pop('''con''' , __UpperCAmelCase )
a = self.to_sql_kwargs.pop('''index''' , __UpperCAmelCase )
a = self._write(index=__UpperCAmelCase , **self.to_sql_kwargs )
return written
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple ) ->List[Any]:
"""simple docstring"""
a , a , a = args
a = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
a = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
a = batch.to_pandas()
a = df.to_sql(self.name , self.con , index=__UpperCAmelCase , **__UpperCAmelCase )
return num_rows or len(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[str] ) ->int:
"""simple docstring"""
a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
a , a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( a :Tuple ) -> int:
a = tmp_path / '''file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :int ) -> List[str]:
a = tmp_path / '''malformed_file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Dict , a :int ) -> List[str]:
a = tmp_path / '''csv_with_image.csv'''
a = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :List[Any] ) -> Dict:
a = tmp_path / '''csv_with_label.csv'''
a = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Tuple ) -> Any:
a = tmp_path / '''csv_with_int_list.csv'''
a = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> List[Any]:
a = Csv()
a = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a ) in record.message
for record in caplog.records )
@require_pil
def _a ( a :Dict ) -> Any:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1]
a = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
a = csv._generate_tables([[csv_file_with_image]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
a = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( a :Any ) -> Tuple:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1:]
a = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
a = csv._generate_tables([[csv_file_with_label]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
a = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels]
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} )
a = csv._generate_tables([[csv_file_with_int_list]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
a = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 26 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 16_00, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 16_00, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__UpperCAmelCase , )
assert hasattr(self , '''env''' )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict ) ->Dict:
"""simple docstring"""
a = {
'''enabled''': True,
'''processes_per_host''': 8,
}
a = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
a = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
a = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=__UpperCAmelCase , py_version='''py36''' , )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any ) ->List[str]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple ) ->str:
"""simple docstring"""
a = self.create_estimator(__UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCAmelCase )
| 26 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = torch.device("cpu")
def _a ( ) -> Union[str, Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
def _a ( a :Dict ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _a ( a :int , a :Any , a :Union[str, Any] ) -> int:
a = dct.pop(a )
a = val
def _a ( a :Any ) -> Dict:
a = []
for k in state_dict.keys():
a = k
if ".pwconv" in k:
a = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
a = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
a = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
a = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
a = k_new.split('''.''' )
if ls[2].isdigit():
a = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
a = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( a :List[Any] , a :Tuple , a :List[str] ) -> Union[str, Any]:
a = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a = [3, 3, 6, 4]
a = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a = [3, 3, 9, 6]
a = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a = [4, 3, 10, 5]
a = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a = [4, 4, 12, 6]
a = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' , check_hash=a )
else:
a = torch.load(a , map_location='''cpu''' )
a = checkpoint
a = create_rename_keys(a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a , a , a )
# load HuggingFace model
a = SwiftFormerForImageClassification(a ).eval()
hf_model.load_state_dict(a )
# prepare test inputs
a = prepare_img()
a = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
a = processor(images=a , return_tensors='''pt''' )
# compare outputs from both models
a = get_expected_output(a )
a = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , a , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCAmelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 26 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase__ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def _a ( a :str ) -> int:
a = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(a , a )
UpperCAmelCase__ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def _a ( a :List[Any] ) -> List[str]:
a = list(s_dict.keys() )
for key in keys:
a = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a = new_key.replace(a , a )
print(F"""{key} -> {new_key}""" )
a = s_dict.pop(a )
return s_dict
def _a ( a :Optional[Any] ) -> List[str]:
a , a = emb.weight.shape
a = nn.Linear(a , a , bias=a )
a = emb.weight.data
return lin_layer
def _a ( a :str , a :str ) -> bytes:
os.makedirs(a , exist_ok=a )
a = os.path.basename(a )
a = url.split('''/''' )[-2]
a = os.path.join(a , a )
if os.path.exists(a ) and not os.path.isfile(a ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(a ):
a = open(a , '''rb''' ).read()
if hashlib.shaaaa(a ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(a ) as source, open(a , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=a , unit_divisor=1_024 ) as loop:
while True:
a = source.read(8_192 )
if not buffer:
break
output.write(a )
loop.update(len(a ) )
a = open(a , '''rb''' ).read()
if hashlib.shaaaa(a ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def _a ( a :List[str] , a :Optional[int] ) -> List[Any]:
if ".pt" not in checkpoint_path:
a = _download(_MODELS[checkpoint_path] )
else:
a = torch.load(a , map_location='''cpu''' )
a = original_checkpoint['''dims''']
a = original_checkpoint['''model_state_dict''']
a = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(a )
rename_keys(a )
a = True
a = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
a = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=a , decoder_ffn_dim=a , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
a = WhisperForConditionalGeneration(a )
a , a = model.model.load_state_dict(a , strict=a )
if len(a ) > 0 and not set(a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a = proj_out_weights
model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 26 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = value_function
a = unet
a = scheduler
a = env
a = env.get_dataset()
a = {}
for key in self.data.keys():
try:
a = self.data[key].mean()
except: # noqa: E722
pass
a = {}
for key in self.data.keys():
try:
a = self.data[key].std()
except: # noqa: E722
pass
a = env.observation_space.shape[0]
a = env.action_space.shape[0]
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
if type(__UpperCAmelCase ) is dict:
return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__UpperCAmelCase , device=self.unet.device )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
for key, val in cond.items():
a = val.clone()
return x_in
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = x.shape[0]
a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
a = torch.full((batch_size,) , __UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a = self.value_function(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample
a = torch.autograd.grad([y.sum()] , [x] )[0]
a = self.scheduler._get_variance(__UpperCAmelCase )
a = torch.exp(0.5 * posterior_variance )
a = model_std * grad
a = 0
a = x.detach()
a = x + scale * grad
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.unet(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , predict_epsilon=__UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.1 ) ->List[str]:
"""simple docstring"""
a = self.normalize(__UpperCAmelCase , '''observations''' )
a = obs[None].repeat(__UpperCAmelCase , axis=0 )
a = {0: self.to_torch(__UpperCAmelCase )}
a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a = randn_tensor(__UpperCAmelCase , device=self.unet.device )
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
# run the diffusion process
a , a = self.run_diffusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# sort output trajectories by value
a = y.argsort(0 , descending=__UpperCAmelCase ).squeeze()
a = x[sorted_idx]
a = sorted_values[:, :, : self.action_dim]
a = actions.detach().cpu().numpy()
a = self.de_normalize(__UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
a = 0
else:
# if we didn't run value guiding, select a random action
a = np.random.randint(0 , __UpperCAmelCase )
a = denorm_actions[selected_index, 0]
return denorm_actions
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[Any]="<sep>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Any="<cls>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : Optional[int]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) ->None:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = 3
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
a = jieba
a = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : List[str] , __UpperCAmelCase : Optional[int] ) ->str:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
if self.remove_space:
a = ''' '''.join(inputs.strip().split() )
else:
a = inputs
a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
a = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
a = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
a = outputs.lower()
return outputs
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = self.preprocess_text(__UpperCAmelCase )
a = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
a = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a = cur_pieces[1:]
else:
a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any ) ->Any:
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict ) ->Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
a = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 26 | 1 |
def _a ( a :bytes ) -> str:
return "".join([hex(a )[2:].zfill(2 ).upper() for byte in list(a )] )
def _a ( a :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(a ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( a :Union[str, Any] , a :List[Any] ) -> List[Any]:
a = checkpoint
a = {}
a = vae_state_dict['''encoder.conv_in.weight''']
a = vae_state_dict['''encoder.conv_in.bias''']
a = vae_state_dict['''encoder.conv_out.weight''']
a = vae_state_dict['''encoder.conv_out.bias''']
a = vae_state_dict['''encoder.norm_out.weight''']
a = vae_state_dict['''encoder.norm_out.bias''']
a = vae_state_dict['''decoder.conv_in.weight''']
a = vae_state_dict['''decoder.conv_in.bias''']
a = vae_state_dict['''decoder.conv_out.weight''']
a = vae_state_dict['''decoder.conv_out.bias''']
a = vae_state_dict['''decoder.norm_out.weight''']
a = vae_state_dict['''decoder.norm_out.bias''']
a = vae_state_dict['''quant_conv.weight''']
a = vae_state_dict['''quant_conv.bias''']
a = vae_state_dict['''post_quant_conv.weight''']
a = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a )
}
for i in range(a ):
a = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
a = num_up_blocks - 1 - i
a = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def _a ( a :str , a :str , ) -> List[str]:
# Only support V1
a = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
a = io.BytesIO(r.content )
a = OmegaConf.load(a )
a = 512
a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
a = {}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
a = f.get_tensor(a )
else:
a = torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
a = create_vae_diffusers_config(a , image_size=a )
a = custom_convert_ldm_vae_checkpoint(a , a )
a = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 26 | 1 |
from __future__ import annotations
def _a ( a :list[float] , a :list[float] ) -> float:
a = sorted(numsa + numsa )
a , a = divmod(len(a ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = [float(x) for x in input("Enter the elements of first array: ").split()]
UpperCAmelCase__ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''CLIPImageProcessor'''
__snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str ) ->Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
UpperCAmelCase__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = ['''input_ids''', '''attention_mask''']
__snake_case = DistilBertTokenizer
def __init__( self : Dict , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[int]="[UNK]" , __UpperCAmelCase : str="[SEP]" , __UpperCAmelCase : Tuple="[PAD]" , __UpperCAmelCase : Any="[CLS]" , __UpperCAmelCase : int="[MASK]" , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : str , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None ) ->Optional[Any]:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 26 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase_ ( lowercase , lowercase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self : Any , __UpperCAmelCase : Union[str, Any]=2_000 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Union[str, Any]=20 , __UpperCAmelCase : Any=1e-3 ) ->Dict:
"""simple docstring"""
a = None
a = None
a = None
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, torch.device] = None ) ->Any:
"""simple docstring"""
a = torch.linspace(1 , self.config.sampling_eps , __UpperCAmelCase , device=__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None ) ->Union[str, Any]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
a = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
a = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
a = std.flatten()
while len(std.shape ) < len(score.shape ):
a = std.unsqueeze(-1 )
a = -score / std
# compute
a = -1.0 / len(self.timesteps )
a = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
a = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
a = beta_t.unsqueeze(-1 )
a = -0.5 * beta_t * x
a = torch.sqrt(__UpperCAmelCase )
a = drift - diffusion**2 * score
a = x + drift * dt
# add noise
a = randn_tensor(x.shape , layout=x.layout , generator=__UpperCAmelCase , device=x.device , dtype=x.dtype )
a = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 26 |
from __future__ import annotations
import typing
from collections import Counter
def _a ( a :int ) -> typing.Counter[int]:
a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a , max_perimeter + 1 ):
a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a ):
a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _a ( a :int = 1_000 ) -> int:
a = pythagorean_triple(a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 26 | 1 |
UpperCAmelCase__ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 26 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 | 1 |
def _a ( a :float , a :list[float] ) -> float:
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
a = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(a ) )
return round(a , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase__ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase__ = 10
UpperCAmelCase__ = 256
def _a ( a :List[str] ) -> Optional[MinHash]:
if len(a ) < MIN_NUM_TOKENS:
return None
a = MinHash(num_perm=a )
for token in set(a ):
min_hash.update(token.encode() )
return min_hash
def _a ( a :str ) -> Set[str]:
return {t for t in NON_ALPHA.split(a ) if len(t.strip() ) > 0}
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , *,
__UpperCAmelCase : float = 0.85 , ) ->Dict:
"""simple docstring"""
a = duplication_jaccard_threshold
a = NUM_PERM
a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a = defaultdict(__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : MinHash ) ->None:
"""simple docstring"""
a = self._index.query(__UpperCAmelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->List[List[Dict]]:
"""simple docstring"""
a = []
for base, duplicates in self._duplicate_clusters.items():
a = [base] + list(__UpperCAmelCase )
# reformat the cluster to be a list of dict
a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__UpperCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict ) ->None:
"""simple docstring"""
a = self.get_duplicate_clusters()
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def _a ( a :List[Any] ) -> List[Any]:
a , a = element
a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _a ( a :Type[Dataset] ) -> List[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def _a ( a :Type[Dataset] , a :float ) -> str:
a = DuplicationIndex(duplication_jaccard_threshold=a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a ) ) , max_queue_size=100 ) ):
di.add(a , a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _a ( a :str , a :str ) -> float:
a = get_tokens(a )
a = get_tokens(a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase__ = None
def _a ( a :Tuple , a :Tuple ) -> Any:
a = []
for elementa in cluster:
a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(a , a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a = 1
extremes.append(a )
return extremes
def _a ( a :List[Any] , a :Optional[Any] , a :Union[str, Any] ) -> Optional[int]:
global _shared_dataset
a = dataset
a = []
a = partial(_find_cluster_extremes_shared , jaccard_threshold=a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a , a , ) , total=len(a ) , ):
extremes_list.append(a )
return extremes_list
def _a ( a :Type[Dataset] , a :float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
a = make_duplicate_clusters(a , a )
a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
a = {}
a = find_extremes(a , a , a )
for extremes in extremes_clusters:
for element in extremes:
a = element
a = duplicate_indices - set(extreme_dict.keys() )
a = dataset.filter(lambda a , a : idx not in remove_indices , with_indices=a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
a = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(a )}""" )
print(F"""Number of duplicate clusters: {len(a )}""" )
print(F"""Files in duplicate cluster: {len(a )}""" )
print(F"""Unique files in duplicate cluster: {len(a )}""" )
print(F"""Filtered dataset size: {len(a )}""" )
return ds_filter, duplicate_clusters
| 26 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = 42
class lowercase_ ( lowercase , lowercase ):
'''simple docstring'''
__snake_case = True
@register_to_config
def __init__( self : Any , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , __UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , __UpperCAmelCase : Tuple[int] = (64,) , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "silu" , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 32 , __UpperCAmelCase : int = 32 , __UpperCAmelCase : float = 0.18215 , ) ->Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
a = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
a = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
a = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
a = False
a = False
# only relevant if vae tiling is enabled
a = self.config.sample_size
a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
a = 0.25
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any]=False ) ->Union[str, Any]:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
a = value
def __lowerCAmelCase ( self : int , __UpperCAmelCase : bool = True ) ->Union[str, Any]:
"""simple docstring"""
a = use_tiling
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
self.enable_tiling(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
a = True
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self : Union[str, Any] ) ->Dict[str, AttentionProcessor]:
"""simple docstring"""
a = {}
def fn_recursive_add_processors(__UpperCAmelCase : str , __UpperCAmelCase : torch.nn.Module , __UpperCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) ->Any:
"""simple docstring"""
a = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__UpperCAmelCase : str , __UpperCAmelCase : torch.nn.Module , __UpperCAmelCase : Dict ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
a = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
a = torch.cat(__UpperCAmelCase )
else:
a = self.encoder(__UpperCAmelCase )
a = self.quant_conv(__UpperCAmelCase )
a = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
a = self.post_quant_conv(__UpperCAmelCase )
a = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def __lowerCAmelCase ( self : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
a = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
a = torch.cat(__UpperCAmelCase )
else:
a = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any ) ->Optional[int]:
"""simple docstring"""
a = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->AutoencoderKLOutput:
"""simple docstring"""
a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
a = int(self.tile_latent_min_size * self.tile_overlap_factor )
a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
a = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
a = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
a = self.encoder(__UpperCAmelCase )
a = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
a = []
for i, row in enumerate(__UpperCAmelCase ):
a = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
a = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
a = torch.cat(__UpperCAmelCase , dim=2 )
a = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
a = int(self.tile_sample_min_size * self.tile_overlap_factor )
a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
a = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
a = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
a = self.post_quant_conv(__UpperCAmelCase )
a = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
a = []
for i, row in enumerate(__UpperCAmelCase ):
a = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
a = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
a = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
a = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[torch.Generator] = None , ) ->Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
a = sample
a = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
a = posterior.sample(generator=__UpperCAmelCase )
else:
a = posterior.mode()
a = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 26 |
from math import ceil, sqrt
def _a ( a :int = 1_000_000 ) -> int:
a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''poolformer'''
def __init__( self : Any , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[int]=16 , __UpperCAmelCase : Optional[int]=16 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Tuple=4.0 , __UpperCAmelCase : str=[2, 2, 6, 2] , __UpperCAmelCase : Optional[int]=[64, 128, 320, 512] , __UpperCAmelCase : Union[str, Any]=[7, 3, 3, 3] , __UpperCAmelCase : Optional[int]=[4, 2, 2, 2] , __UpperCAmelCase : int=[2, 1, 1, 1] , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : Tuple=0.02 , **__UpperCAmelCase : Any , ) ->Dict:
"""simple docstring"""
a = num_channels
a = patch_size
a = stride
a = padding
a = pool_size
a = hidden_sizes
a = mlp_ratio
a = depths
a = patch_sizes
a = strides
a = num_encoder_blocks
a = drop_path_rate
a = hidden_act
a = use_layer_scale
a = layer_scale_init_value
a = initializer_range
super().__init__(**__UpperCAmelCase )
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = version.parse('''1.11''' )
@property
def __lowerCAmelCase ( self : str ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : List[str] ) ->float:
"""simple docstring"""
return 2e-3
| 26 |
UpperCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 26 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int]=13 , __UpperCAmelCase : int=3 , __UpperCAmelCase : int=224 , __UpperCAmelCase : Optional[Any]=30 , __UpperCAmelCase : Dict=400 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , __UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , ) ->Dict:
"""simple docstring"""
a = size if size is not None else {'''height''': 18, '''width''': 18}
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ViTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
a = EfficientFormerImageProcessorTester(self )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
a = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
a = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
a = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
a = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
a = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
a = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 26 |
def _a ( a :list ) -> list:
if len(a ) <= 1:
return lst
a = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a , a = lst[i], lst[i - 1]
i -= 1
if i == 0:
a = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 26 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCAmelCase__ = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
UpperCAmelCase__ = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def _a ( ) -> Optional[Any]:
a = calculate_rouge(a , a , bootstrap_aggregation=a , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(a , a )
a = calculate_rouge(a , a , bootstrap_aggregation=a , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _a ( ) -> int:
a = '''rougeLsum'''
a = calculate_rouge(a , a , newline_sep=a , rouge_keys=[k] )[k]
a = calculate_rouge(a , a , newline_sep=a , rouge_keys=[k] )[k]
assert score > score_no_sep
def _a ( ) -> int:
a = ['''rouge1''', '''rouge2''', '''rougeL''']
a = calculate_rouge(a , a , newline_sep=a , rouge_keys=a )
a = calculate_rouge(a , a , newline_sep=a , rouge_keys=a )
assert score_sep == score_no_sep
def _a ( ) -> Union[str, Any]:
a = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
a = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(a , a , newline_sep=a ) == calculate_rouge(a , a , newline_sep=a )
def _a ( ) -> str:
a = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
a = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
a = calculate_rouge(a , a , rouge_keys=['''rougeLsum'''] , newline_sep=a )['''rougeLsum''']
a = calculate_rouge(a , a , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _a ( ) -> int:
a = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
a = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(a , a )
a = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=a )
assert isinstance(a , a )
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
def _a ( a :int = 600_851_475_143 ) -> int:
try:
a = int(a )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
a = 2
a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a = i
while n % i == 0:
a = n // i
i += 1
return int(a )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def _a ( a :str ) -> Any:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
a = model_type_to_module_name(a )
a = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(a , a )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(a , '''__name__''' , a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
a = importlib.import_module('''transformers''' )
if hasattr(a , a ):
return getattr(a , a )
return None
def _a ( a :Union[str, os.PathLike] , a :Optional[Union[str, os.PathLike]] = None , a :bool = False , a :bool = False , a :Optional[Dict[str, str]] = None , a :Optional[Union[bool, str]] = None , a :Optional[str] = None , a :bool = False , **a :int , ) -> Tuple:
a = get_file_from_repo(
a , a , cache_dir=a , force_download=a , resume_download=a , proxies=a , use_auth_token=a , revision=a , local_files_only=a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(a , encoding='''utf-8''' ) as reader:
return json.load(a )
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple ) ->int:
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def __lowerCAmelCase ( cls : int , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Dict ) ->List[Any]:
"""simple docstring"""
a = kwargs.pop('''config''' , __UpperCAmelCase )
a = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
a = True
a , a = FeatureExtractionMixin.get_feature_extractor_dict(__UpperCAmelCase , **__UpperCAmelCase )
a = config_dict.get('''feature_extractor_type''' , __UpperCAmelCase )
a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.feature_extractor_type``
a = getattr(__UpperCAmelCase , '''feature_extractor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
a = feature_extractor_class_from_name(__UpperCAmelCase )
a = feature_extractor_auto_map is not None
a = feature_extractor_class is not None or type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
a = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
a = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
a = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(__UpperCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
a = FEATURE_EXTRACTOR_MAPPING[type(__UpperCAmelCase )]
return feature_extractor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) ->Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 26 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEImgaImgPipeline
__snake_case = ['''image''']
__snake_case = ['''image''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
a = CLIPVisionModel(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[str] ) ->Tuple:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_image_processor
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict=0 ) ->Union[str, Any]:
"""simple docstring"""
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
a = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 26 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __lowerCAmelCase ( self : int , **__UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
a = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 26 | 1 |
from __future__ import annotations
import bisect
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> int:
if hi < 0:
a = len(a )
while lo < hi:
a = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
a = mid + 1
else:
a = mid
return lo
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> int:
if hi < 0:
a = len(a )
while lo < hi:
a = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
a = mid + 1
else:
a = mid
return lo
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> None:
sorted_collection.insert(bisect_left(a , a , a , a ) , a )
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> None:
sorted_collection.insert(bisect_right(a , a , a , a ) , a )
def _a ( a :list[int] , a :int ) -> int | None:
a = 0
a = len(a ) - 1
while left <= right:
a = left + (right - left) // 2
a = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
a = midpoint - 1
else:
a = midpoint + 1
return None
def _a ( a :list[int] , a :int ) -> int | None:
a = bisect.bisect_left(a , a )
if index != len(a ) and sorted_collection[index] == item:
return index
return None
def _a ( a :list[int] , a :int , a :int , a :int ) -> int | None:
if right < left:
return None
a = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(a , a , a , midpoint - 1 )
else:
return binary_search_by_recursion(a , a , midpoint + 1 , a )
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase__ = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase__ = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase__ = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 26 |
import math
def _a ( a :int = 100 ) -> int:
a = sum(i * i for i in range(1 , n + 1 ) )
a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
from __future__ import annotations
def _a ( a :float , a :float , a :float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _a ( a :float , a :float , a :float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _a ( a :float , a :float , a :float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
a , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
def _a ( a :int = 600_851_475_143 ) -> int:
try:
a = int(a )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
a = 2
a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a = i
while n % i == 0:
a = n // i
i += 1
return int(a )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BarthezTokenizer
__snake_case = BarthezTokenizerFast
__snake_case = True
__snake_case = True
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
super().setUp()
a = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCAmelCase )
a = tokenizer
def __lowerCAmelCase ( self : List[str] ) ->Tuple:
"""simple docstring"""
a = '''<pad>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__UpperCAmelCase ) , 101_122 )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [0, 57, 3_018, 70_307, 91, 2]
a = self.tokenizer(
__UpperCAmelCase , max_length=len(__UpperCAmelCase ) , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
a = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = '''I was born in 92000, and this is falsé.'''
a = tokenizer.tokenize(__UpperCAmelCase )
a = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
a = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__UpperCAmelCase )
a = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : Optional[int] ) ->Any:
"""simple docstring"""
a = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__UpperCAmelCase , )
| 26 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ = "bart"
UpperCAmelCase__ = True
@st.cache(allow_output_mutation=a )
def _a ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
a = qar_model.eval()
else:
a , a = (None, None)
if MODEL_TYPE == "bart":
a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
a = sas_model.eval()
else:
a , a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Dict:
if LOAD_DENSE_INDEX:
a = faiss.StandardGpuResources()
a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
a = faiss.IndexFlatIP(128 )
a = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
a , a = (None, None)
a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def _a ( ) -> Optional[int]:
a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
a = elia['''train_eli5''']
a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_indexes()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = load_models()
UpperCAmelCase__ , UpperCAmelCase__ = load_train_data()
def _a ( a :str , a :Tuple=10 ) -> List[str]:
a = embed_questions_for_retrieval([question] , a , a )
a , a = eli5_train_q_index.search(a , a )
a = [elia_train[int(a )] for i in I[0]]
return nn_examples
def _a ( a :str , a :Any="wiki40b" , a :int="dense" , a :Union[str, Any]=10 ) -> List[str]:
if source == "none":
a , a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a , a = query_qa_dense_index(
a , a , a , a , a , a )
else:
a , a = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
a = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def _a ( a :Tuple , a :int , a :int , a :Dict=64 , a :List[Any]=256 , a :List[Any]=False , a :List[Any]=2 , a :Tuple=0.95 , a :Optional[Any]=0.8 ) -> int:
with torch.no_grad():
a = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
UpperCAmelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
UpperCAmelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
UpperCAmelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
UpperCAmelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
UpperCAmelCase__ = action_list.index(action_st)
UpperCAmelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
UpperCAmelCase__ = show_type == "Show full text of passages"
else:
UpperCAmelCase__ = 3
UpperCAmelCase__ = True
UpperCAmelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
UpperCAmelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
UpperCAmelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
UpperCAmelCase__ = "wiki40b"
UpperCAmelCase__ = "dense"
UpperCAmelCase__ = "beam"
UpperCAmelCase__ = 2
UpperCAmelCase__ = 64
UpperCAmelCase__ = 256
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
UpperCAmelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
UpperCAmelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
UpperCAmelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ = None
# start main text
UpperCAmelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
UpperCAmelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ = st.text_input("Enter your question here:", "")
else:
UpperCAmelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
UpperCAmelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ = support_list[:10]
UpperCAmelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
UpperCAmelCase__ , UpperCAmelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ , UpperCAmelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
UpperCAmelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
UpperCAmelCase__ = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
UpperCAmelCase__ = sec_titles.split(" & ")
UpperCAmelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ = find_nearest_training(question)
UpperCAmelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
UpperCAmelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
UpperCAmelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
from __future__ import annotations
from collections.abc import Callable
def _a ( a :Callable[[int | float], int | float] , a :int | float , a :int | float , a :int = 100 , ) -> float:
a = x_start
a = fnc(a )
a = 0.0
for _ in range(a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
a = (x_end - x_start) / steps + xa
a = fnc(a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
a = xa
a = fxa
return area
if __name__ == "__main__":
def _a ( a :int ) -> Tuple:
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
UpperCAmelCase__ = 10
while i <= 100000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 26 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26 | 1 |
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''only integers accepted as input''' )
else:
a = str(abs(a ) )
a = [list(a ) for char in range(len(a ) )]
for index in range(len(a ) ):
num_transpositions[index].pop(a )
return max(
int(''''''.join(list(a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]:
hf_model.apply_weight_norm()
a = checkpoint['''input_conv.weight_g''']
a = checkpoint['''input_conv.weight_v''']
a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a = checkpoint[F"""upsamples.{i}.1.weight_g"""]
a = checkpoint[F"""upsamples.{i}.1.weight_v"""]
a = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
a = checkpoint['''output_conv.1.weight_g''']
a = checkpoint['''output_conv.1.weight_v''']
a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(a )
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(a )
a = torch.load(a )
load_weights(orig_checkpoint['''model''']['''generator'''] , a , a )
a = np.load(a )
a = stats[0].reshape(-1 )
a = stats[1].reshape(-1 )
a = torch.from_numpy(a ).float()
a = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _a ( a :Tuple ) -> int:
a = tmp_path / '''file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :int ) -> List[str]:
a = tmp_path / '''malformed_file.csv'''
a = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Dict , a :int ) -> List[str]:
a = tmp_path / '''csv_with_image.csv'''
a = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :List[Any] ) -> Dict:
a = tmp_path / '''csv_with_label.csv'''
a = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def _a ( a :Tuple ) -> Any:
a = tmp_path / '''csv_with_int_list.csv'''
a = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
def _a ( a :Dict , a :int , a :Union[str, Any] ) -> List[Any]:
a = Csv()
a = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a ) in record.message
for record in caplog.records )
@require_pil
def _a ( a :Dict ) -> Any:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1]
a = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
a = csv._generate_tables([[csv_file_with_image]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
a = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _a ( a :Any ) -> Tuple:
with open(a , encoding='''utf-8''' ) as f:
a = f.read().splitlines()[1:]
a = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
a = csv._generate_tables([[csv_file_with_label]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
a = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels]
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} )
a = csv._generate_tables([[csv_file_with_int_list]] )
a = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
a = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 26 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : Any=False , **__UpperCAmelCase : str ) ->Optional[int]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a = vocab_size
a = d_embed
a = d_proj
a = cutoffs + [vocab_size]
a = [0] + self.cutoffs
a = div_val
a = self.cutoffs[0]
a = len(self.cutoffs ) - 1
a = self.shortlist_size + self.n_clusters
a = keep_order
a = []
a = []
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
if self.n_clusters > 0:
a = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__UpperCAmelCase , name='''cluster_weight''' )
a = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__UpperCAmelCase , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
a = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_projs_._{i}""" , )
self.out_projs.append(__UpperCAmelCase )
else:
self.out_projs.append(__UpperCAmelCase )
a = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
a = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a = self.d_embed // (self.div_val**i)
a = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_projs_._{i}""" )
self.out_projs.append(__UpperCAmelCase )
a = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
a = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__UpperCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(__UpperCAmelCase )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple=None ) ->Dict:
"""simple docstring"""
a = x
if proj is not None:
a = tf.einsum('''ibd,ed->ibe''' , __UpperCAmelCase , __UpperCAmelCase )
return tf.einsum('''ibd,nd->ibn''' , __UpperCAmelCase , __UpperCAmelCase ) + b
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
a = shape_list(__UpperCAmelCase )
a = tf.range(lp_size[0] , dtype=target.dtype )
a = tf.stack([r, target] , 1 )
return tf.gather_nd(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Union[str, Any]=False ) ->str:
"""simple docstring"""
a = 0
if self.n_clusters == 0:
a = self._logit(__UpperCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
a = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__UpperCAmelCase , logits=__UpperCAmelCase )
a = tf.nn.log_softmax(__UpperCAmelCase , axis=-1 )
else:
a = shape_list(__UpperCAmelCase )
a = []
a = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
a , a = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
a = (target >= l_idx) & (target < r_idx)
a = tf.where(__UpperCAmelCase )
a = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase ) - l_idx
if self.div_val == 1:
a = self.out_layers[0][0][l_idx:r_idx]
a = self.out_layers[0][1][l_idx:r_idx]
else:
a = self.out_layers[i][0]
a = self.out_layers[i][1]
if i == 0:
a = tf.concat([cur_W, self.cluster_weight] , 0 )
a = tf.concat([cur_b, self.cluster_bias] , 0 )
a = self._logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.out_projs[0] )
a = tf.nn.log_softmax(__UpperCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
a = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
a = self._gather_logprob(__UpperCAmelCase , __UpperCAmelCase )
else:
a = self._logit(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.out_projs[i] )
a = tf.nn.log_softmax(__UpperCAmelCase )
a = self.cutoffs[0] + i - 1 # No probability for the head cluster
a = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__UpperCAmelCase )
if target is not None:
a = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
a = tf.boolean_mask(__UpperCAmelCase , __UpperCAmelCase )
a = self._gather_logprob(__UpperCAmelCase , __UpperCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__UpperCAmelCase , -cur_logprob , shape_list(__UpperCAmelCase ) )
a = tf.concat(__UpperCAmelCase , axis=-1 )
if target is not None:
if return_mean:
a = tf.reduce_mean(__UpperCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__UpperCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__UpperCAmelCase , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 26 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = torch.device("cpu")
def _a ( ) -> Union[str, Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return im
def _a ( a :Dict ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _a ( a :int , a :Any , a :Union[str, Any] ) -> int:
a = dct.pop(a )
a = val
def _a ( a :Any ) -> Dict:
a = []
for k in state_dict.keys():
a = k
if ".pwconv" in k:
a = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
a = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
a = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
a = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
a = k_new.split('''.''' )
if ls[2].isdigit():
a = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
a = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( a :List[Any] , a :Tuple , a :List[str] ) -> Union[str, Any]:
a = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a = 1_000
a = '''huggingface/label-files'''
a = '''imagenet-1k-id2label.json'''
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a = [3, 3, 6, 4]
a = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a = [3, 3, 9, 6]
a = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a = [4, 3, 10, 5]
a = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a = [4, 4, 12, 6]
a = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
a = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' , check_hash=a )
else:
a = torch.load(a , map_location='''cpu''' )
a = checkpoint
a = create_rename_keys(a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a , a , a )
# load HuggingFace model
a = SwiftFormerForImageClassification(a ).eval()
hf_model.load_state_dict(a )
# prepare test inputs
a = prepare_img()
a = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
a = processor(images=a , return_tensors='''pt''' )
# compare outputs from both models
a = get_expected_output(a )
a = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , a , atol=1e-3 )
Path(a ).mkdir(exist_ok=a )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCAmelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 26 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def _a ( a :Union[str, Any] ) -> Any:
if not is_accelerate_available():
return method
a = version.parse(accelerate.__version__ ).base_version
if version.parse(a ) < version.parse('''0.17.0''' ):
return method
def wrapper(self :str , *a :Dict , **a :Tuple ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *a , **a )
return wrapper
| 26 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : Optional[int] , ) ->List[str]:
"""simple docstring"""
super().__init__()
a = value_function
a = unet
a = scheduler
a = env
a = env.get_dataset()
a = {}
for key in self.data.keys():
try:
a = self.data[key].mean()
except: # noqa: E722
pass
a = {}
for key in self.data.keys():
try:
a = self.data[key].std()
except: # noqa: E722
pass
a = env.observation_space.shape[0]
a = env.action_space.shape[0]
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) ->Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __lowerCAmelCase ( self : int , __UpperCAmelCase : int ) ->List[str]:
"""simple docstring"""
if type(__UpperCAmelCase ) is dict:
return {k: self.to_torch(__UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__UpperCAmelCase , device=self.unet.device )
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
for key, val in cond.items():
a = val.clone()
return x_in
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = x.shape[0]
a = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
a = torch.full((batch_size,) , __UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a = self.value_function(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample
a = torch.autograd.grad([y.sum()] , [x] )[0]
a = self.scheduler._get_variance(__UpperCAmelCase )
a = torch.exp(0.5 * posterior_variance )
a = model_std * grad
a = 0
a = x.detach()
a = x + scale * grad
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.unet(x.permute(0 , 2 , 1 ) , __UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
a = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , predict_epsilon=__UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
return x, y
def __call__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : str=0.1 ) ->List[str]:
"""simple docstring"""
a = self.normalize(__UpperCAmelCase , '''observations''' )
a = obs[None].repeat(__UpperCAmelCase , axis=0 )
a = {0: self.to_torch(__UpperCAmelCase )}
a = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a = randn_tensor(__UpperCAmelCase , device=self.unet.device )
a = self.reset_xa(__UpperCAmelCase , __UpperCAmelCase , self.action_dim )
a = self.to_torch(__UpperCAmelCase )
# run the diffusion process
a , a = self.run_diffusion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# sort output trajectories by value
a = y.argsort(0 , descending=__UpperCAmelCase ).squeeze()
a = x[sorted_idx]
a = sorted_values[:, :, : self.action_dim]
a = actions.detach().cpu().numpy()
a = self.de_normalize(__UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
a = 0
else:
# if we didn't run value guiding, select a random action
a = np.random.randint(0 , __UpperCAmelCase )
a = denorm_actions[selected_index, 0]
return denorm_actions
| 26 | 1 |
def _a ( a :str ) -> list:
if n_term == "":
return []
a = []
for temp in range(int(a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model"}
UpperCAmelCase__ = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[Any]="<sep>" , __UpperCAmelCase : int="<pad>" , __UpperCAmelCase : Any="<cls>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : Optional[int]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) ->None:
"""simple docstring"""
a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a = 3
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
a = jieba
a = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = self.__dict__.copy()
a = None
return state
def __setstate__( self : List[str] , __UpperCAmelCase : Optional[int] ) ->str:
"""simple docstring"""
a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
if self.remove_space:
a = ''' '''.join(inputs.strip().split() )
else:
a = inputs
a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
a = unicodedata.normalize('''NFKD''' , __UpperCAmelCase )
a = ''''''.join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
a = outputs.lower()
return outputs
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = self.preprocess_text(__UpperCAmelCase )
a = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
a = []
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
a = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a = cur_pieces[1:]
else:
a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Any ) ->Any:
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Dict ) ->Union[str, Any]:
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
a = ''''''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ''' ''' ).strip()
return out_string
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = super()._decode(*__UpperCAmelCase , **__UpperCAmelCase )
a = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 26 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def _a ( a :int ) -> typing.Counter[int]:
a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a , max_perimeter + 1 ):
a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a ):
a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _a ( a :int = 1_000 ) -> int:
a = pythagorean_triple(a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 26 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( a :Union[str, Any] , a :List[Any] ) -> List[Any]:
a = checkpoint
a = {}
a = vae_state_dict['''encoder.conv_in.weight''']
a = vae_state_dict['''encoder.conv_in.bias''']
a = vae_state_dict['''encoder.conv_out.weight''']
a = vae_state_dict['''encoder.conv_out.bias''']
a = vae_state_dict['''encoder.norm_out.weight''']
a = vae_state_dict['''encoder.norm_out.bias''']
a = vae_state_dict['''decoder.conv_in.weight''']
a = vae_state_dict['''decoder.conv_in.bias''']
a = vae_state_dict['''decoder.conv_out.weight''']
a = vae_state_dict['''decoder.conv_out.bias''']
a = vae_state_dict['''decoder.norm_out.weight''']
a = vae_state_dict['''decoder.norm_out.bias''']
a = vae_state_dict['''quant_conv.weight''']
a = vae_state_dict['''quant_conv.bias''']
a = vae_state_dict['''post_quant_conv.weight''']
a = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a )
}
for i in range(a ):
a = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
a = num_up_blocks - 1 - i
a = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def _a ( a :str , a :str , ) -> List[str]:
# Only support V1
a = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
a = io.BytesIO(r.content )
a = OmegaConf.load(a )
a = 512
a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
a = {}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
a = f.get_tensor(a )
else:
a = torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
a = create_vae_diffusers_config(a , image_size=a )
a = custom_convert_ldm_vae_checkpoint(a , a )
a = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 26 | 1 |
def _a ( a :list ) -> list:
if len(a ) <= 1:
return lst
a = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a , a = lst[i], lst[i - 1]
i -= 1
if i == 0:
a = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = ['''image_processor''', '''tokenizer''']
__snake_case = '''CLIPImageProcessor'''
__snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCAmelCase , )
a = kwargs.pop('''feature_extractor''' )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str ) ->Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.tokenizer.model_input_names
a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , )
return self.image_processor_class
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , )
return self.image_processor
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
UpperCAmelCase__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = ['''input_ids''', '''attention_mask''']
__snake_case = DistilBertTokenizer
def __init__( self : Dict , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[int]="[UNK]" , __UpperCAmelCase : str="[SEP]" , __UpperCAmelCase : Tuple="[PAD]" , __UpperCAmelCase : Any="[CLS]" , __UpperCAmelCase : int="[MASK]" , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : str , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None ) ->Optional[Any]:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 26 | 1 |
import os
from collections.abc import Iterator
def _a ( a :str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(a ):
a = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a )[1] in (".py", ".ipynb"):
yield os.path.join(a , a ).lstrip('''./''' )
def _a ( a :Tuple ) -> Any:
return F"""{i * ' '}*""" if i else "\n##"
def _a ( a :str , a :str ) -> str:
a = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(a )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def _a ( a :str = "." ) -> None:
a = ''''''
for filepath in sorted(good_file_paths(a ) ):
a , a = os.path.split(a )
if filepath != old_path:
a = print_path(a , a )
a = (filepath.count(os.sep ) + 1) if filepath else 0
a = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
a = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(a )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(".")
| 26 |
from __future__ import annotations
import typing
from collections import Counter
def _a ( a :int ) -> typing.Counter[int]:
a = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(a , max_perimeter + 1 ):
a = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(a ):
a = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _a ( a :int = 1_000 ) -> int:
a = pythagorean_triple(a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 26 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''efficientnet'''
def __init__( self : Dict , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 600 , __UpperCAmelCase : float = 2.0 , __UpperCAmelCase : float = 3.1 , __UpperCAmelCase : int = 8 , __UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __UpperCAmelCase : List[int] = [] , __UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __UpperCAmelCase : float = 0.25 , __UpperCAmelCase : str = "swish" , __UpperCAmelCase : int = 2_560 , __UpperCAmelCase : str = "mean" , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 0.001 , __UpperCAmelCase : float = 0.99 , __UpperCAmelCase : float = 0.5 , __UpperCAmelCase : float = 0.2 , **__UpperCAmelCase : List[str] , ) ->Any:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a = num_channels
a = image_size
a = width_coefficient
a = depth_coefficient
a = depth_divisor
a = kernel_sizes
a = in_channels
a = out_channels
a = depthwise_padding
a = strides
a = num_block_repeats
a = expand_ratios
a = squeeze_expansion_ratio
a = hidden_act
a = hidden_dim
a = pooling_type
a = initializer_range
a = batch_norm_eps
a = batch_norm_momentum
a = dropout_rate
a = drop_connect_rate
a = sum(__UpperCAmelCase ) * 4
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = version.parse('''1.11''' )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
return 1e-5
| 26 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 | 1 |
from numpy import exp, pi, sqrt
def _a ( a :Optional[int] , a :float = 0.0 , a :float = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase__ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase__ = 10
UpperCAmelCase__ = 256
def _a ( a :List[str] ) -> Optional[MinHash]:
if len(a ) < MIN_NUM_TOKENS:
return None
a = MinHash(num_perm=a )
for token in set(a ):
min_hash.update(token.encode() )
return min_hash
def _a ( a :str ) -> Set[str]:
return {t for t in NON_ALPHA.split(a ) if len(t.strip() ) > 0}
class lowercase_ :
'''simple docstring'''
def __init__( self : Any , *,
__UpperCAmelCase : float = 0.85 , ) ->Dict:
"""simple docstring"""
a = duplication_jaccard_threshold
a = NUM_PERM
a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a = defaultdict(__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : MinHash ) ->None:
"""simple docstring"""
a = self._index.query(__UpperCAmelCase )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->List[List[Dict]]:
"""simple docstring"""
a = []
for base, duplicates in self._duplicate_clusters.items():
a = [base] + list(__UpperCAmelCase )
# reformat the cluster to be a list of dict
a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__UpperCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Dict ) ->None:
"""simple docstring"""
a = self.get_duplicate_clusters()
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def _a ( a :List[Any] ) -> List[Any]:
a , a = element
a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _a ( a :Type[Dataset] ) -> List[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def _a ( a :Type[Dataset] , a :float ) -> str:
a = DuplicationIndex(duplication_jaccard_threshold=a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a ) ) , max_queue_size=100 ) ):
di.add(a , a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _a ( a :str , a :str ) -> float:
a = get_tokens(a )
a = get_tokens(a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase__ = None
def _a ( a :Tuple , a :Tuple ) -> Any:
a = []
for elementa in cluster:
a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(a , a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a = 1
extremes.append(a )
return extremes
def _a ( a :List[Any] , a :Optional[Any] , a :Union[str, Any] ) -> Optional[int]:
global _shared_dataset
a = dataset
a = []
a = partial(_find_cluster_extremes_shared , jaccard_threshold=a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a , a , ) , total=len(a ) , ):
extremes_list.append(a )
return extremes_list
def _a ( a :Type[Dataset] , a :float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
a = make_duplicate_clusters(a , a )
a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
a = {}
a = find_extremes(a , a , a )
for extremes in extremes_clusters:
for element in extremes:
a = element
a = duplicate_indices - set(extreme_dict.keys() )
a = dataset.filter(lambda a , a : idx not in remove_indices , with_indices=a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
a = extreme_dict[element['''base_index''']]['''copies''']
print(F"""Original dataset size: {len(a )}""" )
print(F"""Number of duplicate clusters: {len(a )}""" )
print(F"""Files in duplicate cluster: {len(a )}""" )
print(F"""Unique files in duplicate cluster: {len(a )}""" )
print(F"""Filtered dataset size: {len(a )}""" )
return ds_filter, duplicate_clusters
| 26 | 1 |
def _a ( a :int ) -> bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
from math import ceil, sqrt
def _a ( a :int = 1_000_000 ) -> int:
a = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
from __future__ import annotations
def _a ( a :list[int | str] ) -> None:
create_state_space_tree(a , [] , 0 , [0 for i in range(len(a ) )] )
def _a ( a :list[int | str] , a :list[int | str] , a :int , a :list[int] , ) -> None:
if index == len(a ):
print(a )
return
for i in range(len(a ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
a = True
create_state_space_tree(a , a , index + 1 , a )
current_sequence.pop()
a = False
UpperCAmelCase__ = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCAmelCase__ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 26 |
UpperCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 26 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.